Compare commits

...

39 Commits

Author SHA1 Message Date
Preston Van Loon
3891507012 Update libp2p, fix builds. Tests fail 2023-03-06 16:04:32 -06:00
terencechain
78d49fda13 Minor cleanup to forkchoice pkg (#12078)
* Minor cleanup to forkchoice pkg

* Rm implementation

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-06 17:01:36 +00:00
Nishant Das
3aaba7c065 Remove Feature Flag From Prater (#12082) 2023-03-06 09:56:53 +00:00
terencechain
b8a1bcdfe3 Circuit breaker: lower max builder epoch missed slots to 5 (#12076)
* Circuit breaker: lower max builder epoch missed slots to 5

* Change equality to >=

* Fix tests

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-05 19:10:41 +00:00
Potuz
93514de00f Proposer head v5 (#12075)
* Use ShouldOverrideFCU in regular sync

* fix build

* fix tests

* add feature flag gate in updatehead

* fix rpc tests

* fix grcp tests

* deepsource

* add locks and reuse isNewProposer

* flip flag

* Fix ticker for late blocks

* implement get_proposer_head in GetBeaconBlock

* fix unit tests
2023-03-04 20:19:23 -03:00
terencechain
86a883aa19 Add capella fork epoch for Goerli (#12073)
* Add capella fork epoch for Goerli

* update pr

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
2023-03-04 09:43:20 +00:00
Preston Van Loon
c000e8fde5 Raise the max grpc message size to a very large value by default (#12072)
* Raise the max grpc message size to a very large value by default

* unused import

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 19:04:58 +00:00
dependabot[bot]
75e5887f07 Bump golang.org/x/net from 0.6.0 to 0.7.0 (#12063)
* Bump golang.org/x/net from 0.6.0 to 0.7.0

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.6.0 to 0.7.0.
- [Release notes](https://github.com/golang/net/releases)
- [Commits](https://github.com/golang/net/compare/v0.6.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* gazelle

* go mod tidy

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 17:37:16 +00:00
Radosław Kapka
4ca3c5b058 Add slot to proposal error logs (#12071)
* Add slot to proposal error logs

* remove one field
2023-03-02 16:33:39 +00:00
Radosław Kapka
25d06d41be Fix type name in field_trie_helpers error message (#12070)
(cherry picked from commit bc9330c329)
2023-03-02 14:33:22 +00:00
Potuz
0a87210514 Forkchoice external locks v2 (#12036)
* write locks

* fix forkchoice tests

* blockchain locks

* lock on IsOptimistic

* use forkchoice instead of chaininfo within savehead

* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests

* remove VerifyFinalizedBlkDescendant

* don't write lock wrappers

* fix validateBeaconBlock

* Terence's review and more missing locks

* add lock for InForkChoice

* lock head on fillMissingBlockPayload

* fix lock on IsOptimisticForRoot

* fix lock in fillMissingBlockPayloadId

* extra comments

* lock proposerBoost on spectests

* nishant's review

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-02 09:10:52 -03:00
Nishant Das
196798eacc Update Deps For Capella (#12067)
* update

* gzl

* Using zstd workaround from @tals, per github.com/bazelbuild/rules_go/issues/3411

* gaz

* hacky patch

---------

Co-authored-by: rkapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 03:31:41 +00:00
Radosław Kapka
17fe935343 Deprecate --interop-genesis-state (#12008)
* Deprecaste `--interop-genesis-state`

* better pattern

* fix errors

* test fix

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-01 19:39:17 +00:00
Radosław Kapka
ac4483417d Redesign voluntary exits pool (#11898)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-01 17:44:00 +01:00
Potuz
0d3fb0a32b lock head on fillMissingBlockPayload (#12068) 2023-03-01 13:09:10 +00:00
Nishant Das
3d337b07e1 Remove Ropsten Testnet Config (#12058)
* remove support for ropsten testnet

* add deprecated flag for ropsten

---------

Co-authored-by: P <1674920+peterbitfly@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-03-01 13:26:32 +08:00
Raul Jordan
11b90e1f63 Store Blinded Beacon Blocks by Default for New Prysm Databases (#11591)
* fresh database checks

* gaz

* fix up tests

* fix test

* build

* bellatrix pass tests

* fix flags

* gaz

* fix up rpc test

* gaz

* building

* refactor, remove boolean blindness

* kv blocks

* fix up ensure coverage

* e2e default

* add missing case

* nishant feedback:

* log

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-01 00:07:23 +00:00
Preston Van Loon
3c73bac798 Update protoc-gen-go-cast to suppress tool output (#12062)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-28 23:04:22 +00:00
Preston Van Loon
91fee5db17 Update distroless base images (#12061)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-28 22:50:37 +00:00
Preston Van Loon
155b0c161e Bazel: cleanup .bazelrc file (#12059)
* Reorganize bazelrc and intro new flags

* move cross compilation toolchain into its own bazelrc

* Restore build_tests_only
2023-02-28 21:36:43 +00:00
Nishant Das
a7010d817d Fix Scenario Test Failures (#12056)
* fix scenario failures

* fix up

* continue fixing
2023-02-28 23:21:15 +08:00
james-prysm
c0dd233a1c E2E: beacon api post attester duties (#11899)
* adding new post request util and attester duties test

* adding in status checks

* fixing error

* increasing size of request

* fixing gofmt
2023-02-28 13:35:02 +08:00
Preston Van Loon
c391fad258 Update rules docker to v0.25.0 (#12054)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-02-27 23:46:10 +00:00
Radosław Kapka
e92b546a36 Return proposer reward from ProcessSyncAggregate (#12047) 2023-02-27 19:04:23 +01:00
Patrice Vignola
765345ac3a Remove the gRPC fallback client from the validator REST API (#12051) 2023-02-27 12:46:34 +00:00
Potuz
ec13d52f03 Remove VerifyFinalizedBlkDescendant (#12046)
* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests

* remove VerifyFinalizedBlkDescendant

* fix validateBeaconBlock
2023-02-25 13:39:13 -03:00
Manu NALEPA
08ebc99bc3 Add (lack of) REST implementation for GetFeeRecipientByPubKey (#11991)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-02-24 21:43:52 +00:00
james-prysm
8918e8c274 prysmctl: check capella fork for blstoexecutionchange (#12039)
* adding in a check for current fork to disable submission prior

* fixing linting

* fixing error return

* updating error contains in test

* fixing unit test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-24 16:28:32 +00:00
Patrice Vignola
0e4185b40c Remove the StreamDuties endpoint (#12044)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-24 15:56:31 +00:00
Nishant Das
383edb3553 Fix Validator Participation Evaluator (#12045)
* fix bug

* remove proto changes
2023-02-24 10:39:06 +00:00
james-prysm
40589905bc Prysm V4: Web3signer changes for capella (#12001)
* adding in changes for capella

* fixing metrics

* updating the web3signer version
2023-02-24 06:21:39 +00:00
Potuz
ae76240f83 Use forkchoice HasNode to check if a block is consistent with finality (#12040)
* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests
2023-02-23 21:21:34 +00:00
Radosław Kapka
096cf7b8c1 Test invalid change inclusion in blstoexec pool (#12037)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-23 17:18:37 +00:00
james-prysm
7c3027801b Prysm v4 - mark old endpoints for deprecation (#11997)
* updating some protos

* updating proto endpoints

* updating generated code

* fixing linting

* updating protos

* updating based on feedback. also removing unused storage protos

* adding in deprecation notice on the server functions

* Update proto/prysm/v1alpha1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/health.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/validator.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/debug.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/v1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/health.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* addressing feedback

* rewording comment

* updating comments

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-23 16:32:03 +00:00
Raul Jordan
8f6d5ff075 Web-ui-version-v2.0.3-commit (#12038)
Co-authored-by: james-prysm <james-prysm@users.noreply.github.com>
2023-02-23 10:11:42 -06:00
james-prysm
c379c9ea47 Prysm V4 - Deprecate web UI endpoints (#12025)
* wip adding deprecation to web api endpoints

* wip adding more deprecation notices

* adding in a few more deprecation markers

* updating order of comments
2023-02-23 15:24:06 +00:00
Radosław Kapka
75e8f85ba5 Handle EL offline/unavailable in getSyncingStatus (#12031)
* Handle EL offline/unavailable in `getSyncingStatus`

* fix type

* better name

* test fix
2023-02-23 12:09:56 +00:00
Nishant Das
8d82ca08ab Allow Testing Of Withdrawals On E2E Mainnet (#12027) 2023-02-23 12:04:53 +01:00
Radosław Kapka
29f645f0cc Add Eth-Consensus-Version header to SubmitBlindedBlock (#12029)
* Add `Eth-Consensus-Version` header to `SubmitBlindedBlock`

* bring back function

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 17:42:05 +00:00
211 changed files with 6321 additions and 5657 deletions

178
.bazelrc
View File

@@ -1,9 +1,9 @@
# Print warnings for tests with inappropriate test size or timeout.
test --test_verbose_timeout_warnings
# Only build test targets when running bazel test //...
test --build_tests_only
test --test_output=errors
# Import bazelrc presets
import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
test:e2e --define gotags=debug
@@ -11,21 +11,9 @@ test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
build --host_force_python=PY2
run --host_force_python=PY2
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
# is required within the sandbox. Network sandboxing only works on linux.
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
run --incompatible_strict_action_env
build --define blst_disabled=false
run --define blst_disabled=false
@@ -68,42 +56,6 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# osx_arm64 config for cross compiler toolchain
build:osx_arm64 --config=cross
build:osx_arm64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64_cgo
build:osx_arm64 --compiler=osxcross
build:osx_arm64 --cpu=aarch64
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
@@ -111,123 +63,5 @@ build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# osx_arm64 debug config
build:osx_arm64_debug --config=debug
build:osx_arm64_debug --config=osx_arm64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# osx_arm64 docker sandbox build config
build:osx_arm64_docker --config=docker-sandbox --config=osx_arm64
build:osx_arm64_docker_debug --config=osx_arm64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -76,9 +76,8 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
strip_prefix = "rules_docker-0.19.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
)
http_archive(
@@ -122,32 +121,36 @@ load(
"container_pull",
)
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
container_pull(
name = "cc_image_base",
digest = "sha256:41036fc7ed8df0f6addc18484cef0c94a85867508967789f947e11ffd5ff0cc8",
name = "cc_image_base_amd64",
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
registry = "gcr.io",
repository = "distroless/cc",
repository = "distroless/cc-debian11",
)
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
container_pull(
name = "cc_debug_image_base",
digest = "sha256:6865ad48467c89c3c3524d4c426f52ad12d9ab7dec31fad31fae69da40eb6445",
name = "cc_debug_image_base_amd64",
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
registry = "gcr.io",
repository = "distroless/cc",
repository = "distroless/cc-debian11",
)
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
container_pull(
name = "go_image_base",
digest = "sha256:b9b124f955961599e72630654107a0cf04e08e6fa777fa250b8f840728abd770",
name = "go_image_base_amd64",
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
registry = "gcr.io",
repository = "distroless/base",
repository = "distroless/base-debian11",
)
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
container_pull(
name = "go_debug_image_base",
digest = "sha256:65668d2b78d25df3d8ccf5a778d017fcaba513b52078c700083eaeef212b9979",
name = "go_debug_image_base_amd64",
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
registry = "gcr.io",
repository = "distroless/base",
repository = "distroless/base-debian11",
)
container_pull(
@@ -282,9 +285,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
@@ -315,9 +318,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b2226874526805d64c29e5053fa28e511b57c0860585d6d59777ee81ff4859ca",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.2/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
],
)

View File

@@ -295,7 +295,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
@@ -322,7 +325,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")

View File

@@ -263,6 +263,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -288,6 +289,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),

View File

@@ -33,7 +33,7 @@ type ChainInfoFetcher interface {
// HeadUpdater defines a common interface for methods in blockchain service
// which allow to update the head info
type HeadUpdater interface {
UpdateHead(context.Context) error
UpdateHead(context.Context, primitives.Slot)
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -84,7 +84,7 @@ type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
@@ -96,24 +96,32 @@ type OptimisticModeFetcher interface {
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// BestJustifiedCheckpt returns the best justified checkpoint from store.
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().BestJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
@@ -277,6 +285,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
@@ -289,6 +299,8 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
@@ -330,6 +342,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
headRoot := s.head.root
s.headLock.RUnlock()
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
if err == nil {
return optimistic, nil
@@ -345,6 +359,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
// IsFinalized returns true if the input root is finalized.
// It first checks latest finalized root then checks finalized root index in DB.
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
return true
}
@@ -356,10 +372,21 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
}
// InForkchoice returns true if the given root is found in forkchoice
// This in particular means that the blockroot is a descendant of the
// finalized checkpoint
func (s *Service) InForkchoice(root [32]byte) bool {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
return s.ForkChoicer().HasNode(root)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
s.ForkChoicer().RLock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
s.ForkChoicer().RUnlock()
if err == nil {
return optimistic, nil
}

View File

@@ -26,7 +26,7 @@ var (
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -5,11 +5,13 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
)
func (s *Service) isNewProposer() bool {
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* root */)
func (s *Service) isNewProposer(slot primitives.Slot) bool {
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
return ok
}
@@ -40,28 +42,58 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
return headState, newHeadBlock, nil
}
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte) error {
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead && !s.isNewProposer() {
isNewProposer := s.isNewProposer(proposingSlot)
if !isNewHead && !isNewProposer {
return nil
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
var headState state.BeaconState
var headBlock interfaces.ReadOnlySignedBeaconBlock
var headRoot [32]byte
var err error
shouldUpdate := isNewHead
if isNewHead && isNewProposer && !features.Get().DisableReorgLateBlocks {
if proposingSlot == s.CurrentSlot() {
proposerHead := s.ForkChoicer().GetProposerHead()
if proposerHead != newHeadRoot {
shouldUpdate = false
}
} else if s.ForkChoicer().ShouldOverrideFCU() {
shouldUpdate = false
}
}
if shouldUpdate {
headRoot = newHeadRoot
headState, headBlock, err = s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
} else {
// We are guaranteed that the head block is the parent
// of the incoming block. We do not process the slot
// because it will be processed anyway in notifyForkchoiceUpdate
headState = s.headState(ctx)
headRoot = s.headRoot()
headBlock, err = s.headBlock()
if err != nil {
return errors.Wrap(err, "could not get head block")
}
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
if err != nil {
return err
return errors.Wrap(err, "could not notify forkchoice update")
}
if isNewHead {
if shouldUpdate {
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}

View File

@@ -21,10 +21,10 @@ import (
func TestService_isNewProposer(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, false, service.isNewProposer())
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
require.Equal(t, true, service.isNewProposer())
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
}
func TestService_isNewHead(t *testing.T) {
@@ -75,7 +75,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot()))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1))
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
@@ -83,7 +83,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1))
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
@@ -107,7 +107,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
@@ -124,7 +124,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
@@ -134,7 +134,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1))
require.Equal(t, service.headRoot(), headRoot)
}
@@ -184,7 +184,9 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
// Set head to be the same but proposing next slot
service.head.root = r
service.head.block = sb
service.head.state = st
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
}

View File

@@ -52,6 +52,7 @@ type head struct {
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
// Caller of the method MUST aqcuire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -122,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.IsOptimistic(ctx)
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
@@ -37,7 +36,7 @@ import (
//
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()
@@ -63,7 +62,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
genesisTime := uint64(s.genesisTime.Unix())
// Verify attestation target is from current epoch or previous epoch.
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
return err
}
@@ -72,11 +71,11 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
return errors.Wrap(err, "could not verify attestation beacon block")
}
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
// Note that LMD GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
// validate_aggregate_proof.go and validate_beacon_attestation.go
// Verify attestations can only affect the fork choice of subsequent slots.
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
return err
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -118,7 +117,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := service.OnAttestation(ctx, tt.a)
err := service.OnAttestation(ctx, tt.a, 0)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -155,7 +154,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0]))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestStore_SaveCheckpointState(t *testing.T) {
@@ -324,96 +323,3 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, bState, r32))
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, jc))
_, err = service.cfg.ForkChoiceStore.Head(ctx)
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}

View File

@@ -107,6 +107,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
return err
}
// Verify that the parent block is in forkchoice
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
@@ -209,7 +214,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot); err != nil {
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
return err
}
@@ -567,6 +572,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
@@ -661,15 +667,15 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
break
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case ti := <-ticker.C:
if err := s.fillMissingBlockPayloadId(ctx, ti); err != nil {
case <-ticker.C():
if err := s.fillMissingBlockPayloadId(ctx); err != nil {
log.WithError(err).Error("Could not fill missing payload ID")
}
case <-s.ctx.Done():
case <-ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
@@ -677,36 +683,32 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
}()
}
// Returns true if time `t` is halfway through the slot in sec.
func atHalfSlot(t time.Time) bool {
s := params.BeaconConfig().SecondsPerSlot
return uint64(t.Second())%s == s/2
}
func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) error {
if !atHalfSlot(ti) {
return nil
}
if s.CurrentSlot() == s.cfg.ForkChoiceStore.HighestReceivedBlockSlot() {
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
s.ForkChoicer().RLock()
highestReceivedSlot := s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
s.ForkChoicer().RUnlock()
if s.CurrentSlot() == highestReceivedSlot {
return nil
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if has && id == [8]byte{} {
missedPayloadIDFilledCount.Inc()
headBlock, err := s.headBlock()
if err != nil {
return err
} else {
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {
return err
}
}
if !has || id != [8]byte{} {
return nil
}
return nil
missedPayloadIDFilledCount.Inc()
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
return err
}
headState := s.headState(ctx)
headRoot := s.headRoot()
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
return err
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
@@ -14,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v3/math"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
@@ -71,10 +69,6 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
return errors.New("could not reconstruct parent state")
}
if err := s.VerifyFinalizedBlkDescendant(ctx, parentRoot); err != nil {
return err
}
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
if err != nil {
return err
@@ -88,35 +82,6 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
return nil
}
// VerifyFinalizedBlkDescendant validates if input block root is a descendant of the
// current finalized block root.
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
defer span.End()
finalized := s.ForkChoicer().FinalizedCheckpoint()
fRoot := s.ensureRootNotZeros(finalized.Root)
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
}
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if bFinalizedRoot == nil {
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
}
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
return invalidBlock{error: err}
}
return nil
}
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
@@ -272,7 +237,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
return errNotDescendantOfFinalized
return ErrNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}

View File

@@ -65,6 +65,10 @@ func TestStore_OnBlock(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
ojc := &ethpb.Checkpoint{}
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
@@ -73,11 +77,16 @@ func TestStore_OnBlock(t *testing.T) {
util.SaveBlock(t, ctx, beaconDB, random)
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
tests := []struct {
name string
@@ -108,10 +117,11 @@ func TestStore_OnBlock(t *testing.T) {
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
b.Block.Slot = 2
return b
}(),
s: st.Copy(),
wantErrString: "is not a descendant of the current finalized block",
wantErrString: "not descendant of finalized checkpoint",
},
{
name: "same slot as finalized block",
@@ -441,7 +451,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.Equal(t, errNotDescendantOfFinalized.Error(), err.Error())
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
// blockTree1 constructs the following tree:
@@ -707,87 +717,6 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
assert.Equal(t, root, r, "Did not get wanted justified root")
}
func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
b := util.NewBeaconBlock()
b.Block.Slot = 32
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
b1 := util.NewBeaconBlock()
b1.Block.Slot = 32
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b1)
type args struct {
parentRoot [32]byte
finalizedRoot [32]byte
}
tests := []struct {
name string
args args
wantedErr string
invalidBlockRoot bool
}{
{
name: "could not get finalized block in block service cache",
args: args{
finalizedRoot: [32]byte{'a'},
},
wantedErr: "block not found in cache or db",
},
{
name: "could not get finalized block root in DB",
args: args{
finalizedRoot: r,
parentRoot: [32]byte{'a'},
},
wantedErr: "could not get finalized block root",
},
{
name: "is not descendant",
args: args{
finalizedRoot: r1,
parentRoot: r,
},
wantedErr: "is not a descendant of the current finalized block slot",
invalidBlockRoot: true,
},
{
name: "is descendant",
args: args{
finalizedRoot: r,
parentRoot: r,
},
},
}
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: tt.args.finalizedRoot, Epoch: 1}))
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
if tt.invalidBlockRoot {
require.Equal(t, true, IsInvalidBlock(err))
}
} else if err != nil {
assert.NoError(t, err)
}
}
}
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
@@ -2140,63 +2069,14 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
// Check that the head is still INVALID and the node is optimistic
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
// Import blocks 21--23
for i := 21; i < 24; i++ {
driftGenesisTime(service, int64(i), 0)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
}
// Head should still be INVALID and the node is optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
// Import block 24, it should justify Epoch 3 and become HEAD, the node
// recovers
driftGenesisTime(service, 24, 0)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 24)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
service.ForkChoicer().SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, primitives.Epoch(3), sjc.Epoch)
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
@@ -2284,7 +2164,7 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
require.NoError(t, service.fillMissingBlockPayloadId(ctx), 0)
}
// Helper function to simulate the block being on time or delayed for proposer

View File

@@ -11,7 +11,9 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -19,6 +21,10 @@ import (
"go.opencensus.io/trace"
)
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
// attestations to see if we will reorg the incoming block
const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
@@ -29,7 +35,7 @@ type AttestationStateFetcher interface {
type AttestationReceiver interface {
AttestationStateFetcher
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
InForkchoice([32]byte) bool
}
// AttestationTargetState returns the pre state of attestation.
@@ -60,33 +66,6 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
return nil
}
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
// When the input root is not be consistent with finalized store then we know it is not
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
return nil
}
f := s.FinalizedCheckpt()
ss, err := slots.EpochStart(f.Epoch)
if err != nil {
return err
}
r, err := s.ancestor(ctx, root, ss)
if err != nil {
return err
}
if !bytes.Equal(f.Root, r) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// Wait for state to be initialized.
@@ -115,32 +94,40 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
return
case <-pat.C():
s.ForkChoicer().Lock()
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
s.ForkChoicer().Unlock()
case <-st.C():
s.ForkChoicer().Lock()
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("Could not process new slot")
log.WithError(err).Error("could not process new slot")
}
if err := s.UpdateHead(s.ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
}
s.UpdateHead(s.ctx, s.CurrentSlot())
s.ForkChoicer().Unlock()
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
// The caller of this function MUST hold a lock in forkchoice
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
start := time.Now()
s.processAttestations(ctx)
// This function is only called at 10 seconds or 0 seconds into the slot
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
if !features.Get().DisableReorgLateBlocks {
disparity += reorgLateBlockCountAttestations
}
s.processAttestations(ctx, disparity)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
start = time.Now()
@@ -158,21 +145,20 @@ func (s *Service) UpdateHead(ctx context.Context) error {
}).Debug("Head changed due to attestations")
}
s.headLock.RUnlock()
if err := s.forkchoiceUpdateWithExecution(ctx, newHeadRoot); err != nil {
return err
if err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot); err != nil {
log.WithError(err).Error("could not update forkchoice")
}
return nil
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context) {
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
atts := s.cfg.AttPool.ForkchoiceAttestations()
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.Data.Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
continue
}
@@ -190,7 +176,7 @@ func (s *Service) processAttestations(ctx context.Context) {
continue
}
if err := s.receiveAttestationNoPubsub(ctx, a); err != nil {
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
log.WithFields(logrus.Fields{
"slot": a.Data.Slot,
"committeeIndex": a.Data.CommitteeIndex,
@@ -207,11 +193,11 @@ func (s *Service) processAttestations(ctx context.Context) {
// 1. Validate attestation, update validator's latest vote
// 2. Apply fork choice to the processed attestation
// 3. Save latest head info
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
defer span.End()
if err := s.OnAttestation(ctx, att); err != nil {
if err := s.OnAttestation(ctx, att, disparity); err != nil {
return errors.Wrap(err, "could not process attestation")
}

View File

@@ -120,7 +120,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
service.processAttestations(ctx, 0)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
}
@@ -183,10 +183,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))
service.UpdateHead(ctx, 0)
require.Equal(t, tRoot, service.headRoot())
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
}
func TestService_UpdateHead_NoAtts(t *testing.T) {
@@ -236,9 +235,8 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.Equal(t, 3, fcs.NodeCount())
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, err, service.UpdateHead(ctx))
service.UpdateHead(ctx, 0)
require.Equal(t, r, service.headRoot())
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, r, service.head.root) // Validate head is the new one
}

View File

@@ -8,6 +8,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
@@ -45,6 +46,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
@@ -63,11 +67,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
// Reports on block and fork choice metrics.
finalized := s.FinalizedCheckpt()
cp := s.ForkChoicer().FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
justified := s.CurrentJustifiedCheckpt()
cp = s.ForkChoicer().JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
@@ -90,6 +96,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
err := errors.Wrap(err, "could not process block in batch")
@@ -114,14 +123,15 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
})
// Reports on blockCopy and fork choice metrics.
finalized := s.FinalizedCheckpt()
cp := s.ForkChoicer().FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
}
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
finalized := s.FinalizedCheckpt()
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}
@@ -142,6 +152,8 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
}
@@ -179,11 +191,12 @@ func (s *Service) handleBlockBLSToExecChanges(blk interfaces.ReadOnlyBeaconBlock
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
// Requires a read lock on forkchoice
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality primitives.Epoch
finalized := s.FinalizedCheckpt()
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}

View File

@@ -97,7 +97,8 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
pending, err := s.cfg.ExitPool.PendingExits()
require.NoError(t, err)
if len(pending) != 0 {
t.Errorf(
"Did not mark the correct number of exits. Got %d pending but wanted %d",

View File

@@ -44,20 +44,19 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
processAttestationsLock sync.Mutex
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
}
// config options for the service.
@@ -200,6 +199,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
@@ -426,6 +427,8 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
s.originBlockRoot = genesisBlkRoot
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.WithError(err).Fatal("Could not process genesis block for fork choice")
}
@@ -446,6 +449,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
// 1.) Check fork choice store.
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
// this function requires a lock in forkchoice
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
if s.cfg.ForkChoiceStore.HasNode(root) {
return true

View File

@@ -20,11 +20,13 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -19,11 +19,13 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
)
@@ -32,6 +34,7 @@ var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
NotFinalized bool
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
@@ -389,11 +392,6 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
// VerifyFinalizedBlkDescendant mocks VerifyBlkDescendant and always returns nil.
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
return s.VerifyBlkDescendantErr
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
@@ -402,14 +400,6 @@ func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attes
return nil
}
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
if !bytes.Equal(r, s.FinalizedCheckPoint.Root) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// ChainHeads mocks ChainHeads and always return nil.
func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
return [][32]byte{
@@ -459,6 +449,11 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
return s.Optimistic, nil
}
// InForkchoice mocks the same method in the chain service
func (s *ChainService) InForkchoice(_ [32]byte) bool {
return !s.NotFinalized
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
s.OptimisticCheckRootReceived = root
@@ -466,7 +461,17 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bo
}
// UpdateHead mocks the same method in the chain service.
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
if err != nil {
logrus.WithError(err).Error("could not update head")
}
err = s.ForkChoicer().InsertNode(ctx, st, root)
if err != nil {
logrus.WithError(err).Error("could not insert node to forkchoice")
}
}
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
@@ -475,3 +480,37 @@ func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterS
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
return s.FinalizedRoots[blockRoot]
}
// prepareForkchoiceState prepares a beacon state with the given data to mock
// insert into forkchoice
func prepareForkchoiceState(
_ context.Context,
slot primitives.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
base := &ethpb.BeaconStateBellatrix{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, 1),
CurrentJustifiedCheckpoint: justified,
FinalizedCheckpoint: finalized,
LatestExecutionPayloadHeader: executionHeader,
LatestBlockHeader: blockHeader,
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
}

View File

@@ -44,49 +44,51 @@ import (
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, uint64, error) {
s, votedKeys, reward, err := processSyncAggregate(ctx, s, sync)
if err != nil {
return nil, errors.Wrap(err, "could not filter sync committee votes")
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
}
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, errors.Wrap(err, "could not verify sync committee signature")
return nil, 0, errors.Wrap(err, "could not verify sync committee signature")
}
return s, nil
return s, reward, nil
}
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
// public keys that voted, for future signature verification.
// verifying the BLS signatures. It returns the modified beacons state, the list of validators'
// public keys that voted (for future signature verification) and the proposer reward for including
// sync aggregate messages.
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
state.BeaconState,
[]bls.PublicKey,
uint64,
error) {
currentSyncCommittee, err := s.CurrentSyncCommittee()
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
if currentSyncCommittee == nil {
return nil, nil, errors.New("nil current sync committee in state")
return nil, nil, 0, errors.New("nil current sync committee in state")
}
committeeKeys := currentSyncCommittee.Pubkeys
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
return nil, nil, errors.New("bits length exceeds committee length")
return nil, nil, 0, errors.New("bits length exceeds committee length")
}
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
earnedProposerReward := uint64(0)
@@ -94,29 +96,29 @@ func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, nil, errors.New("validator public key does not exist in state")
return nil, nil, 0, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
votedKeys = append(votedKeys, pubKey)
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
earnedProposerReward += proposerReward
} else {
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
}
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
return s, votedKeys, err
return s, votedKeys, earnedProposerReward, err
}
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -53,8 +54,10 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
beaconState, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
var reward uint64
beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
assert.Equal(t, uint64(72192), reward)
// Use a non-sync committee index to compare profitability.
syncCommittee := make(map[primitives.ValidatorIndex]bool)
@@ -127,7 +130,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.ErrorContains(t, "invalid sync committee signature", err)
}
@@ -164,7 +167,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
}
@@ -189,7 +192,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
SyncCommitteeBits: syncBits,
}
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
require.Equal(t, 511, len(votedKeys))
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
@@ -212,7 +215,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
SyncCommitteeBits: syncBits,
}
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
for _, key := range votedKeys {

View File

@@ -116,7 +116,7 @@ func VerifyExitAndSignature(
return nil
}
// verifyExitConditions implements the spec defined validation for voluntary exits(excluding signatures).
// verifyExitConditions implements the spec defined validation for voluntary exits (excluding signatures).
//
// Spec pseudocode definition:
//

View File

@@ -351,7 +351,7 @@ func ProcessBlockForStateRoot(
if err != nil {
return nil, errors.Wrap(err, "could not get sync aggregate from block")
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}

View File

@@ -178,7 +178,6 @@ func SlashValidator(
if err != nil {
return nil, errors.Wrap(err, "could not get proposer idx")
}
// In phase 0, the proposer is the whistleblower.
whistleBlowerIdx := proposerIdx
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
proposerReward := whistleblowerReward / proposerRewardQuotient

View File

@@ -18,7 +18,6 @@ go_library(
"log.go",
"migration.go",
"migration_archived_index.go",
"migration_blinded_beacon_blocks.go",
"migration_block_slot_index.go",
"migration_state_validators.go",
"schema.go",

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
@@ -272,6 +271,22 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
return s.SaveBlocks(ctx, []interfaces.ReadOnlySignedBeaconBlock{signed})
}
// This function determines if we should save beacon blocks in the DB in blinded format by checking
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
// to the DB for future checks.
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
var saveBlinded bool
if err := s.db.View(func(tx *bolt.Tx) error {
metadataBkt := tx.Bucket(chainMetadataBucket)
saveBlinded = len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
return nil
}); err != nil {
return false, err
}
return saveBlinded, nil
}
// SaveBlocks via bulk updates to the db.
func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
@@ -287,7 +302,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
if err != nil {
return err
}
enc, err := marshalBlock(ctx, blk)
enc, err := s.marshalBlock(ctx, blk)
if err != nil {
return err
}
@@ -296,6 +311,10 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
indicesForBlocks[i] = indicesByBucket
}
saveBlinded, err := s.shouldSaveBlinded(ctx)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for i, blk := range blks {
@@ -305,7 +324,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
}
if features.Get().EnableOnlyBlindedBeaconBlocks {
if saveBlinded {
blindedBlock, err := blk.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
@@ -809,50 +828,71 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
return blocks.NewSignedBeaconBlock(rawBlock)
}
// marshal versioned beacon block from struct type down to bytes.
func marshalBlock(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
func (s *Store) marshalBlock(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
shouldBlind, err := s.shouldSaveBlinded(ctx)
if err != nil {
return nil, err
}
if shouldBlind {
return marshalBlockBlinded(ctx, blk)
}
return marshalBlockFull(ctx, blk)
}
// Encodes a full beacon block to the DB with its associated key.
func marshalBlockFull(
_ context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
var encodedBlock []byte
var err error
blockToSave := blk
if features.Get().EnableOnlyBlindedBeaconBlocks {
blindedBlock, err := blk.ToBlinded()
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal non-blinded block")
}
case err != nil:
return nil, errors.Wrap(err, "could not convert block to blinded format")
default:
encodedBlock, err = blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
blockToSave = blindedBlock
}
} else {
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
switch blockToSave.Version() {
switch blk.Version() {
case version.Capella:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
}
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
}
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
case version.Altair:
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
case version.Phase0:
return snappy.Encode(nil, encodedBlock), nil
default:
return nil, errors.New("Unknown block version")
return nil, errors.New("unknown block version")
}
}
// Encodes a blinded beacon block with its associated key.
// If the block does not support blinding, we then encode it as a full
// block with its associated key by calling marshalBlockFull.
func marshalBlockBlinded(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
blindedBlock, err := blk.ToBlinded()
if err != nil {
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
return marshalBlockFull(ctx, blk)
default:
return nil, errors.Wrap(err, "could not convert block to blinded format")
}
}
encodedBlock, err := blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
case version.Bellatrix:
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
default:
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}
}

View File

@@ -1,7 +1,6 @@
package kv
import (
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
)
@@ -10,7 +9,4 @@ func init() {
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
features.Init(&features.Flags{
EnableOnlyBlindedBeaconBlocks: true,
})
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/io/file"
bolt "go.etcd.io/bbolt"
)
@@ -194,7 +195,8 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
return nil, err
}
if err = kv.checkNeedsResync(); err != nil {
// Setup the type of block storage used depending on whether or not this is a fresh database.
if err := kv.setupBlockStorageType(ctx); err != nil {
return nil, err
}
return kv, nil
@@ -229,21 +231,60 @@ func (s *Store) DatabasePath() string {
return s.databasePath
}
func (s *Store) checkNeedsResync() error {
return s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(migrationsBucket)
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
return fmt.Errorf(
"you have disabled the flag %s, and your node must resync to ensure your "+
"database is compatible. If you do not want to resync, please re-enable the %s flag",
features.EnableOnlyBlindedBeaconBlocks.Name,
features.EnableOnlyBlindedBeaconBlocks.Name,
)
func (s *Store) setupBlockStorageType(ctx context.Context) error {
// We check if we want to save blinded beacon blocks by checking a key in the db
// otherwise, we check the last stored block and set that key in the DB if it is blinded.
headBlock, err := s.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get head block when setting up block storage type")
}
err = blocks.BeaconBlockIsNil(headBlock)
isNilBlk := err != nil
saveFull := features.Get().SaveFullExecutionPayloads
var saveBlinded bool
if err := s.db.Update(func(tx *bolt.Tx) error {
// If we have a key stating we wish to save blinded beacon blocks, then we set saveBlinded to true.
metadataBkt := tx.Bucket(chainMetadataBucket)
keyExists := len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
if keyExists {
saveBlinded = true
return nil
}
// If the head block exists and is blinded, we update the key in the DB to
// say we wish to save all blocks as blinded.
if !isNilBlk && headBlock.IsBlinded() {
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
return err
}
saveBlinded = true
}
if isNilBlk && !saveFull {
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
return err
}
saveBlinded = true
}
return nil
})
}); err != nil {
return err
}
// If the user wants to save full execution payloads but their database is saving blinded blocks only,
// we then throw an error as the node should not start.
if saveFull && saveBlinded {
return fmt.Errorf(
"cannot use the %s flag with this existing database, as it has already been initialized to only store "+
"execution payload headers (aka blinded beacon blocks). If you want to use this flag, you must re-sync your node with a fresh "+
"database. We recommend using checkpoint sync https://docs.prylabs.network/docs/prysm-usage/checkpoint-sync/",
features.SaveFullExecutionPayloads.Name,
)
}
if saveFull {
log.Warn("Saving full beacon blocks to the database. For greater disk space savings, we recommend resyncing from an empty database with " +
"checkpoint sync to save only blinded beacon blocks by default")
}
return nil
}
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {

View File

@@ -2,10 +2,14 @@ package kv
import (
"context"
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
bolt "go.etcd.io/bbolt"
)
@@ -19,16 +23,169 @@ func setupDB(t testing.TB) *Store {
return db
}
func Test_checkNeedsResync(t *testing.T) {
store := setupDB(t)
resetFn := features.InitWithReset(&features.Flags{
EnableOnlyBlindedBeaconBlocks: false,
func Test_setupBlockStorageType(t *testing.T) {
ctx := context.Background()
t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
})
t.Run("fresh database with default settings should store blinded", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: false,
})
defer resetFn()
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wantedBlk, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wantedBlk, retrievedBlk)
})
t.Run("existing database with blinded blocks but no key in metadata bucket should continue storing blinded blocks", func(t *testing.T) {
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Put(saveBlindedBeaconBlocksKey, []byte{1})
}))
blk := util.NewBlindedBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayloadHeader.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
// We then delete the key from the bucket.
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
}))
// Not a fresh database, has blinded blocks already and should continue being that way.
err = store.setupBlockStorageType(ctx)
require.NoError(t, err)
var shouldSaveBlinded bool
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(chainMetadataBucket)
shouldSaveBlinded = len(bkt.Get(saveBlindedBeaconBlocksKey)) > 0
return nil
}))
// Should have set the chain metadata bucket to save blinded
require.Equal(t, true, shouldSaveBlinded)
blkFull := util.NewBeaconBlockBellatrix()
blkFull.Block.Body.ExecutionPayload.BlockNumber = 2
wrappedBlock, err = blocks.NewSignedBeaconBlock(blkFull)
require.NoError(t, err)
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wrappedBlinded, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
})
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
}))
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
// Not a fresh database, has full blocks already and should continue being that way.
err = store.setupBlockStorageType(ctx)
require.NoError(t, err)
blk = util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 2
wrappedBlock, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
})
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wantedBlk, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wantedBlk, retrievedBlk)
// Trying to enable full blocks with a database that is already storing blinded blocks should error.
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
err = store.setupBlockStorageType(ctx)
errMsg := "cannot use the %s flag with this existing database, as it has already been initialized"
require.ErrorContains(t, fmt.Sprintf(errMsg, features.SaveFullExecutionPayloads.Name), err)
})
defer resetFn()
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(migrationsBucket)
return bkt.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
}))
err := store.checkNeedsResync()
require.ErrorContains(t, "your node must resync", err)
}

View File

@@ -14,7 +14,6 @@ var migrations = []migration{
migrateArchivedIndex,
migrateBlockSlotIndex,
migrateStateValidators,
migrateBlindedBeaconBlocksEnabled,
}
// RunMigrations defined in the migrations array.

View File

@@ -1,27 +0,0 @@
package kv
import (
"bytes"
"context"
"github.com/prysmaticlabs/prysm/v3/config/features"
bolt "go.etcd.io/bbolt"
)
var migrationBlindedBeaconBlocksKey = []byte("blinded-beacon-blocks-enabled")
func migrateBlindedBeaconBlocksEnabled(ctx context.Context, db *bolt.DB) error {
if !features.Get().EnableOnlyBlindedBeaconBlocks {
return nil // Only write to the migrations bucket if the feature is enabled.
}
if updateErr := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if b := mb.Get(migrationBlindedBeaconBlocksKey); bytes.Equal(b, migrationCompleted) {
return nil // Migration already completed.
}
return mb.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
}); updateErr != nil {
return updateErr
}
return nil
}

View File

@@ -49,12 +49,12 @@ var (
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
bellatrixBlindKey = []byte("blind-bellatrix")
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
bellatrixBlindKey = []byte("blind-bellatrix")
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated

View File

@@ -129,9 +129,9 @@ go_test(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
"@com_github_ethereum_go_ethereum//beacon/engine:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/beacon:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",

View File

@@ -240,7 +240,7 @@ func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
ttd := new(uint256.Int)
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
require.NoError(t, err)
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
require.Equal(t, false, reached)
}

View File

@@ -11,9 +11,9 @@ import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
@@ -23,10 +23,10 @@ import (
func FuzzForkChoiceResponse(f *testing.F) {
valHash := common.Hash([32]byte{0xFF, 0x01})
payloadID := beacon.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
payloadID := engine.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
valErr := "asjajshjahsaj"
seed := &beacon.ForkChoiceResponse{
PayloadStatus: beacon.PayloadStatusV1{
seed := &engine.ForkChoiceResponse{
PayloadStatus: engine.PayloadStatusV1{
Status: "INVALID_TERMINAL_BLOCK",
LatestValidHash: &valHash,
ValidationError: &valErr,
@@ -37,7 +37,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.ForkChoiceResponse{}
gethResp := &engine.ForkChoiceResponse{}
prysmResp := &execution.ForkchoiceUpdatedResponse{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -49,14 +49,14 @@ func FuzzForkChoiceResponse(f *testing.F) {
gethBlob, gethErr := json.Marshal(gethResp)
prysmBlob, prysmErr := json.Marshal(prysmResp)
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
newGethResp := &beacon.ForkChoiceResponse{}
newGethResp := &engine.ForkChoiceResponse{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
if newGethResp.PayloadStatus.Status == "UNKNOWN" {
return
}
newGethResp2 := &beacon.ForkChoiceResponse{}
newGethResp2 := &engine.ForkChoiceResponse{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)
@@ -75,7 +75,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
func FuzzExchangeTransitionConfiguration(f *testing.F) {
valHash := common.Hash([32]byte{0xFF, 0x01})
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
seed := &beacon.TransitionConfigurationV1{
seed := &engine.TransitionConfigurationV1{
TerminalTotalDifficulty: &ttd,
TerminalBlockHash: valHash,
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
@@ -85,7 +85,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.TransitionConfigurationV1{}
gethResp := &engine.TransitionConfigurationV1{}
prysmResp := &pb.TransitionConfiguration{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -103,11 +103,11 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
if gethErr != nil {
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
}
newGethResp := &beacon.TransitionConfigurationV1{}
newGethResp := &engine.TransitionConfigurationV1{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
newGethResp2 := &beacon.TransitionConfigurationV1{}
newGethResp2 := &engine.TransitionConfigurationV1{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)
})
@@ -115,7 +115,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
func FuzzExecutionPayload(f *testing.F) {
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
execData := &beacon.ExecutableData{
execData := &engine.ExecutableData{
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
@@ -135,7 +135,7 @@ func FuzzExecutionPayload(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.ExecutableData{}
gethResp := &engine.ExecutableData{}
prysmResp := &pb.ExecutionPayload{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -147,10 +147,10 @@ func FuzzExecutionPayload(f *testing.F) {
gethBlob, gethErr := json.Marshal(gethResp)
prysmBlob, prysmErr := json.Marshal(prysmResp)
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
newGethResp := &beacon.ExecutableData{}
newGethResp := &engine.ExecutableData{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
newGethResp2 := &beacon.ExecutableData{}
newGethResp2 := &engine.ExecutableData{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)

View File

@@ -46,8 +46,6 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return len(f.store.nodeByRoot)
}
@@ -58,16 +56,9 @@ func (f *ForkChoice) Head(
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
calledHeadCount.Inc()
// Using the write lock here because subsequent calls to `updateBalances`, `applyProposerBoostScore`,
// `applyWeightChanges`, `updateBestDescendant`, and `head` require write operations on nodes.
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
if err := f.updateBalances(); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update balances")
}
@@ -94,8 +85,6 @@ func (f *ForkChoice) Head(
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch primitives.Epoch) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
for _, index := range validatorIndices {
// Validator indices will grow the vote cache.
@@ -161,7 +150,6 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
// updateCheckpoints update the checkpoints when inserting a new node.
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
f.store.checkpointsLock.Lock()
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
if jc.Epoch > f.store.bestJustifiedCheckpoint.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
@@ -175,7 +163,6 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: root}
if err := f.updateJustifiedBalances(ctx, root); err != nil {
f.store.checkpointsLock.Unlock()
return errors.Wrap(err, "could not update justified balances")
}
} else {
@@ -186,25 +173,18 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
}
jSlot, err := slots.EpochStart(currentJcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
// Releasing here the checkpoints lock because
// AncestorRoot acquires a lock on nodes and that can
// cause a double lock.
f.store.checkpointsLock.Unlock()
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
return err
}
f.store.checkpointsLock.Lock()
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
f.store.checkpointsLock.Unlock()
return errors.Wrap(err, "could not update justified balances")
}
}
@@ -214,14 +194,12 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
jcRoot := bytesutil.ToBytes32(jc.Root)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jcRoot}
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
f.store.checkpointsLock.Unlock()
return errors.Wrap(err, "could not update justified balances")
}
}
}
// Update finalization
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
f.store.checkpointsLock.Unlock()
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
@@ -231,29 +209,21 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: root}
if err := f.updateJustifiedBalances(ctx, root); err != nil {
f.store.checkpointsLock.Unlock()
return errors.Wrap(err, "could not update justified balances")
}
}
f.store.checkpointsLock.Unlock()
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
_, ok := f.store.nodeByRoot[root]
return ok
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
@@ -273,9 +243,6 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
if f.store.allTipsAreInvalid {
return true, nil
}
@@ -293,9 +260,6 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
@@ -317,12 +281,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes. This function requires a lock in Store.nodesLock
// and votesLock
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
// lock checkpoints for the justified balances
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
newBalances := f.justifiedBalances
for index, vote := range f.votes {
@@ -367,7 +327,6 @@ func (f *ForkChoice) updateBalances() error {
return errors.Wrap(ErrNilNode, "could not update balances")
}
if currentNode.balance < oldBalance {
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
@@ -377,7 +336,6 @@ func (f *ForkChoice) updateBalances() error {
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
@@ -405,8 +363,6 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
// SetOptimisticToValid sets the node with the given root as a fully validated node
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
@@ -416,29 +372,21 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
// BestJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.bestJustifiedCheckpoint
}
// PreviousJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.prevJustifiedCheckpoint
}
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.justifiedCheckpoint
}
// FinalizedCheckpoint of fork choice store.
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.finalizedCheckpoint
}
@@ -451,11 +399,6 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
// store-tracked list. Votes from these validators are not accounted for
// in forkchoice.
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.ValidatorIndex) {
f.votesLock.RLock()
defer f.votesLock.RUnlock()
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
// return early if the index was already included:
if f.store.slashedIndices[index] {
return
@@ -489,8 +432,6 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(ctx context.Context, jc *forkchoi
if jc == nil {
return errInvalidNilCheckpoint
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = jc
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
@@ -505,8 +446,6 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
if fc == nil {
return errInvalidNilCheckpoint
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.finalizedCheckpoint = fc
return nil
}
@@ -516,9 +455,6 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
@@ -559,7 +495,7 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
}
}
// InsertOptimisticChain inserts all nodes corresponding to blocks in the slice
// InsertChain inserts all nodes corresponding to blocks in the slice
// `blocks`. This slice must be ordered from child to parent. It includes all
// blocks **except** the first one (that is the one with the highest slot
// number). All blocks are assumed to be a strict chain
@@ -601,8 +537,6 @@ func (f *ForkChoice) SetOriginRoot(root [32]byte) {
// CachedHeadRoot returns the last cached head root
func (f *ForkChoice) CachedHeadRoot() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node := f.store.headNode
if node == nil {
return [32]byte{}
@@ -612,8 +546,6 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
@@ -625,8 +557,6 @@ func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
@@ -692,8 +622,6 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
@@ -702,7 +630,6 @@ func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
}
// updateJustifiedBalances updates the validators balances on the justified checkpoint pointed by root.
// This function requires a lock on checkpointsLock being held by the caller.
func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte) error {
balances, err := f.balancesByRoot(ctx, root)
if err != nil {

View File

@@ -205,7 +205,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
f.store.nodesLock.Lock()
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
@@ -213,7 +212,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()
r1 := [32]byte{'1'}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: r1}

View File

@@ -21,8 +21,7 @@ const orphanLateBlockFirstThreshold = 4
const processAttestationsThreshold = 10
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
@@ -43,7 +42,7 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
}
// updateBestDescendant updates the best descendant of this node and its
// children. This function assumes the caller has a lock on Store.nodesLock
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()

View File

@@ -27,8 +27,6 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
@@ -55,8 +53,6 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400

View File

@@ -42,11 +42,9 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
f.store.checkpointsLock.RLock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
f.store.checkpointsLock.RUnlock()
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
@@ -62,13 +60,11 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
return err
}
if r == fcp.Root {
f.store.checkpointsLock.Lock()
f.store.prevJustifiedCheckpoint = jcp
f.store.justifiedCheckpoint = bjcp
if err := f.updateJustifiedBalances(ctx, bjcp.Root); err != nil {
log.Error("could not update justified balances")
}
f.store.checkpointsLock.Unlock()
}
}
if !features.Get().DisablePullTips {

View File

@@ -8,34 +8,28 @@ import (
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
s.nodesLock.RLock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
node, ok = s.nodeByRoot[parentRoot]
if !ok || node == nil {
s.nodesLock.RUnlock()
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
// return early if the parent is LVH
if node.payloadHash == payloadHash {
s.nodesLock.RUnlock()
return invalidRoots, nil
}
} else {
if node == nil {
s.nodesLock.RUnlock()
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if node.parent.root != parentRoot {
s.nodesLock.RUnlock()
return invalidRoots, errInvalidParentRoot
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
s.nodesLock.RUnlock()
return invalidRoots, ctx.Err()
}
}
@@ -44,20 +38,16 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
if firstInvalid.parent == nil {
// return early if the invalid node was not imported
if node.root == parentRoot {
s.nodesLock.RUnlock()
return invalidRoots, nil
}
firstInvalid = node
}
s.nodesLock.RUnlock()
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
if node == nil {
@@ -96,7 +86,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
}
}
invalidRoots = append(invalidRoots, node.root)
s.proposerBoostLock.Lock()
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
@@ -104,7 +93,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
s.proposerBoostLock.Unlock()
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil

View File

@@ -237,19 +237,15 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)

View File

@@ -10,23 +10,14 @@ import (
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{}
f.store.proposerBoostLock.Unlock()
return nil
}
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes. This function requires a lock in Store.nodesLock.
// relevant nodes.
func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
s.proposerBoostLock.Lock()
defer s.proposerBoostLock.Unlock()
// acquire checkpoints lock for the justified balances
s.checkpointsLock.RLock()
defer s.checkpointsLock.RUnlock()
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
@@ -53,7 +44,5 @@ func (f *ForkChoice) applyProposerBoostScore() error {
// ProposerBoost of fork choice store.
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return s.proposerBoostRoot
}

View File

@@ -34,9 +34,6 @@ const orphanLateBlockProposingEarly = 2
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
override = false
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// We only need to override FCU if our current head is from the current
// slot. This differs from the spec implementation in that we assume
// that we will call this function in the previous slot to proposing.
@@ -61,9 +58,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
return
}
// Only reorg if we have been finalizing
f.store.checkpointsLock.RLock()
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
f.store.checkpointsLock.RUnlock()
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return
}
@@ -95,9 +90,6 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
head := f.store.headNode
if head == nil {
return [32]byte{}
@@ -121,9 +113,7 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
return head.root
}
// Only reorg if we have been finalizing
f.store.checkpointsLock.RLock()
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
f.store.checkpointsLock.RUnlock()
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return head.root
}

View File

@@ -14,12 +14,10 @@ import (
)
// head starts from justified root and then follows the best descendant links
// to find the best block for head. This function assumes a lock on s.nodesLock
// to find the best block for head.
func (s *Store) head(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
defer span.End()
s.checkpointsLock.RLock()
defer s.checkpointsLock.RUnlock()
if err := ctx.Err(); err != nil {
return [32]byte{}, err
@@ -71,9 +69,6 @@ func (s *Store) insert(ctx context.Context,
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
// Return if the block has been inserted into Store before.
if n, ok := s.nodeByRoot[root]; ok {
return n, nil
@@ -115,16 +110,12 @@ func (s *Store) insert(ctx context.Context,
currentSlot := slots.CurrentSlot(s.genesisTime)
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
if currentSlot == slot && secondsIntoSlot < boostThreshold {
s.proposerBoostLock.Lock()
s.proposerBoostRoot = root
s.proposerBoostLock.Unlock()
}
// Update best descendants
s.checkpointsLock.RLock()
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
s.checkpointsLock.RUnlock()
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
return n, err
}
@@ -147,7 +138,7 @@ func (s *Store) insert(ctx context.Context,
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store. This method assumes a lock on nodesLock.
// choice store.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
if ctx.Err() != nil {
return ctx.Err()
@@ -173,12 +164,8 @@ func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.checkpointsLock.RLock()
finalizedRoot := s.finalizedCheckpoint.Root
finalizedEpoch := s.finalizedCheckpoint.Epoch
s.checkpointsLock.RUnlock()
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
@@ -222,9 +209,6 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
var roots [][32]byte
var slots []primitives.Slot
s.nodesLock.RLock()
defer s.nodesLock.RUnlock()
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
@@ -236,28 +220,14 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
if f.store.highestReceivedNode == nil {
return 0
}
return f.store.highestReceivedNode.slot
}
// HighestReceivedBlockRoot returns the highest slot root received by the forkchoice
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
if f.store.highestReceivedNode == nil {
return [32]byte{}
}
return f.store.highestReceivedNode.root
}
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
count := uint64(0)
lowerBound := slots.CurrentSlot(f.store.genesisTime)
var err error

View File

@@ -320,26 +320,6 @@ func TestStore_PruneMapsNodes(t *testing.T) {
}
func TestForkChoice_HighestReceivedBlockSlotRoot(t *testing.T) {
f := setup(1, 1)
s := f.store
_, err := s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.Equal(t, primitives.Slot(100), s.highestReceivedNode.slot)
require.Equal(t, primitives.Slot(100), f.HighestReceivedBlockSlot())
require.Equal(t, [32]byte{'A'}, f.HighestReceivedBlockRoot())
_, err = s.insert(context.Background(), 1000, [32]byte{'B'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.Equal(t, primitives.Slot(1000), s.highestReceivedNode.slot)
require.Equal(t, primitives.Slot(1000), f.HighestReceivedBlockSlot())
require.Equal(t, [32]byte{'B'}, f.HighestReceivedBlockRoot())
_, err = s.insert(context.Background(), 500, [32]byte{'C'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.Equal(t, primitives.Slot(1000), s.highestReceivedNode.slot)
require.Equal(t, primitives.Slot(1000), f.HighestReceivedBlockSlot())
require.Equal(t, [32]byte{'B'}, f.HighestReceivedBlockRoot())
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
f := setup(1, 1)
s := f.store

View File

@@ -11,12 +11,12 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
sync.RWMutex
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
votes []Vote // tracks individual validator's last vote.
balances []uint64 // tracks individual validator's balances last accounted in votes.
justifiedBalances []uint64 // tracks individual validator's last justified balances.
numActiveValidators uint64 // tracks the total number of active validators. Requires a checkpoints lock to read/write
numActiveValidators uint64 // tracks the total number of active validators.
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
}
@@ -31,16 +31,13 @@ type Store struct {
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch. Requires a checkpoints lock to read/write
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex
genesisTime uint64
highestReceivedNode *Node // The highest slot node.
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.

View File

@@ -16,9 +16,6 @@ import (
)
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
@@ -31,9 +28,6 @@ func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoc
}
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
@@ -48,10 +42,6 @@ func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoc
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
// epochs stored within nodes. It should be called at the beginning of each epoch.
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
for _, node := range f.store.nodeByRoot {
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
@@ -81,16 +71,9 @@ func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
}
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
if node.parent == nil { // Nothing to do if the parent is nil.
return jc, fc
}
s.checkpointsLock.Lock()
defer s.checkpointsLock.Unlock()
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
stateSlot := state.Slot()
stateEpoch := slots.ToEpoch(stateSlot)

View File

@@ -23,16 +23,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.nodesLock.RLock()
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
f.store.nodesLock.RUnlock()
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
f.store.nodesLock.RLock()
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
f.store.nodesLock.RUnlock()
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))

View File

@@ -124,7 +124,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// /
// 5 <- head, justified epoch = 2
//
// We set this node's slot to be 64 so that when prunning below we do not prune its child
// We set this node's slot to be 64 so that when pruning below we do not prune its child
state, blkRoot, err = prepareForkchoiceState(context.Background(), 2*params.BeaconConfig().SlotsPerEpoch, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))

View File

@@ -16,6 +16,10 @@ type BalancesByRooter func(context.Context, [32]byte) ([]uint64, error)
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
type ForkChoicer interface {
Lock()
Unlock()
RLock()
RUnlock()
HeadRetriever // to compute head.
BlockProcessor // to track new block for fork choice.
AttestationProcessor // to track new attestation for fork choice.
@@ -26,9 +30,8 @@ type ForkChoicer interface {
// HeadRetriever retrieves head root and optimistic info of the current chain.
type HeadRetriever interface {
Head(context.Context) ([32]byte, error)
GetProposerHead() [32]byte
CachedHeadRoot() [32]byte
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)
}
// BlockProcessor processes the block that's used for accounting fork choice.
@@ -40,7 +43,6 @@ type BlockProcessor interface {
// AttestationProcessor processes the attestation that's used for accounting fork choice.
type AttestationProcessor interface {
ProcessAttestation(context.Context, []uint64, [32]byte, primitives.Epoch)
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
}
// Getter returns fork choice related information.
@@ -58,10 +60,12 @@ type Getter interface {
BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint
NodeCount() int
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
ReceivedBlocksLastEpoch() (uint64, error)
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
Weight(root [32]byte) (uint64, error)
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)
ShouldOverrideFCU() bool
}
// Setter allows to set forkchoice information
@@ -74,4 +78,5 @@ type Setter interface {
SetOriginRoot([32]byte)
NewSlot(context.Context, primitives.Slot) error
SetBalancesByRooter(BalancesByRooter)
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
}

View File

@@ -7,16 +7,8 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
// ProposerBoostRootArgs to call the BoostProposerRoot function.
type ProposerBoostRootArgs struct {
BlockRoot [32]byte
BlockSlot primitives.Slot
CurrentSlot primitives.Slot
SecondsIntoSlot uint64
}
// Checkpoint is an array version of ethpb.Checkpoint. It is used internally in
// forkchoice, while the slice version is used in the interface to legagy code
// forkchoice, while the slice version is used in the interface to legacy code
// in other packages
type Checkpoint struct {
Epoch primitives.Epoch

View File

@@ -129,12 +129,11 @@ func configureInteropConfig(cliCtx *cli.Context) error {
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil
}
genStateIsSet := cliCtx.IsSet(flags.InteropGenesisStateFlag.Name)
genTimeIsSet := cliCtx.IsSet(flags.InteropGenesisTimeFlag.Name)
numValsIsSet := cliCtx.IsSet(flags.InteropNumValidatorsFlag.Name)
votesIsSet := cliCtx.IsSet(flags.InteropMockEth1DataVotesFlag.Name)
if genStateIsSet || genTimeIsSet || numValsIsSet || votesIsSet {
if genTimeIsSet || numValsIsSet || votesIsSet {
if err := params.SetActive(params.InteropConfig().Copy()); err != nil {
return err
}

View File

@@ -219,17 +219,6 @@ func TestConfigureInterop(t *testing.T) {
},
"interop",
},
{
"genesis state set",
func() *cli.Context {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String(flags.InteropGenesisStateFlag.Name, "", "")
assert.NoError(t, set.Set(flags.InteropGenesisStateFlag.Name, "/path/"))
return cli.NewContext(&app, set, nil)
},
"interop",
},
}
for _, tt := range tests {

View File

@@ -7,7 +7,6 @@ import (
"bytes"
"context"
"fmt"
"math"
"os"
"os/signal"
"path/filepath"
@@ -67,9 +66,6 @@ import (
const testSkipPowFlag = "test-skip-pow"
// 128MB max message size when enabling debug endpoints.
const debugGrpcMaxMsgSize = 1 << 27
// Used as a struct to keep cli flag options for configuring services
// for the beacon node. We keep this as a separate struct to not pollute the actual BeaconNode
// struct, as it is merely used to pass down configuration options into the appropriate services.
@@ -762,10 +758,9 @@ func (b *BeaconNode) registerRPCService() error {
}
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
var depositFetcher depositcache.DepositFetcher
var chainStartFetcher execution.ChainStartFetcher
if genesisValidators > 0 || genesisStatePath != "" {
if genesisValidators > 0 {
var interopService *interopcoldstart.Service
if err := b.services.FetchService(&interopService); err != nil {
return err
@@ -787,9 +782,6 @@ func (b *BeaconNode) registerRPCService() error {
maxMsgSize := b.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
if enableDebugRPCEndpoints {
maxMsgSize = int(math.Max(float64(maxMsgSize), debugGrpcMaxMsgSize))
}
p2pService := b.fetchP2P()
rpcService := rpc.NewService(b.ctx, &rpc.Config{
@@ -881,9 +873,6 @@ func (b *BeaconNode) registerGRPCGateway() error {
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
httpModules := b.cliCtx.String(flags.HTTPModules.Name)
timeout := b.cliCtx.Int(cmd.ApiTimeoutFlag.Name)
if enableDebugRPCEndpoints {
maxCallSize = uint64(math.Max(float64(maxCallSize), debugGrpcMaxMsgSize))
}
gatewayConfig := gateway.DefaultConfig(enableDebugRPCEndpoints, httpModules)
muxs := make([]*apigateway.PbMux, 0)
@@ -917,15 +906,13 @@ func (b *BeaconNode) registerGRPCGateway() error {
func (b *BeaconNode) registerDeterminsticGenesisService() error {
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
if genesisValidators > 0 || genesisStatePath != "" {
if genesisValidators > 0 {
svc := interopcoldstart.NewService(b.ctx, &interopcoldstart.Config{
GenesisTime: genesisTime,
NumValidators: genesisValidators,
BeaconDB: b.db,
DepositCache: b.depositCache,
GenesisPath: genesisStatePath,
})
svc.Start()

View File

@@ -114,7 +114,7 @@ func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
genesisBytes, err := genesisState.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, os.WriteFile("genesis_ssz.json", genesisBytes, 0666))
set.String(flags.InteropGenesisStateFlag.Name, "genesis_ssz.json", "")
set.String("genesis-state", "genesis_ssz.json", "")
ctx := cli.NewContext(&app, set, nil)
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
WithBuilderFlagOptions([]builder.Option{}),

View File

@@ -157,6 +157,16 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
assert.Equal(t, primitives.ValidatorIndex(30), changes[1].Message.ValidatorIndex)
})
t.Run("invalid change not returned", func(t *testing.T) {
pool := NewPool()
saveByte := signedChanges[1].Message.FromBlsPubkey[5]
signedChanges[1].Message.FromBlsPubkey[5] = 0xff
pool.InsertBLSToExecChange(signedChanges[1])
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, 0, len(changes))
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
})
}
func TestInsertBLSToExecChange(t *testing.T) {

View File

@@ -4,33 +4,40 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"service.go",
"pool.go",
],
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits",
visibility = [
"//beacon-chain:__subpackages__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/doubly-linked-list:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["service_test.go"],
srcs = ["pool_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"//time/slots:go_default_library",
],
)

View File

@@ -1,8 +1,6 @@
package mock
import (
"context"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -14,12 +12,17 @@ type PoolMock struct {
}
// PendingExits --
func (m *PoolMock) PendingExits(_ state.ReadOnlyBeaconState, _ primitives.Slot, _ bool) []*eth.SignedVoluntaryExit {
return m.Exits
func (m *PoolMock) PendingExits() ([]*eth.SignedVoluntaryExit, error) {
return m.Exits, nil
}
// ExitsForInclusion --
func (m *PoolMock) ExitsForInclusion(_ state.ReadOnlyBeaconState, _ primitives.Slot) ([]*eth.SignedVoluntaryExit, error) {
return m.Exits, nil
}
// InsertVoluntaryExit --
func (m *PoolMock) InsertVoluntaryExit(_ context.Context, _ state.ReadOnlyBeaconState, exit *eth.SignedVoluntaryExit) {
func (m *PoolMock) InsertVoluntaryExit(exit *eth.SignedVoluntaryExit) {
m.Exits = append(m.Exits, exit)
}

View File

@@ -0,0 +1,139 @@
package voluntaryexits
import (
"math"
"sync"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
doublylinkedlist "github.com/prysmaticlabs/prysm/v3/container/doubly-linked-list"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
)
// PoolManager maintains pending and seen voluntary exits.
// This pool is used by proposers to insert voluntary exits into new blocks.
type PoolManager interface {
PendingExits() ([]*ethpb.SignedVoluntaryExit, error)
ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error)
InsertVoluntaryExit(exit *ethpb.SignedVoluntaryExit)
MarkIncluded(exit *ethpb.SignedVoluntaryExit)
}
// Pool is a concrete implementation of PoolManager.
type Pool struct {
lock sync.RWMutex
pending doublylinkedlist.List[*ethpb.SignedVoluntaryExit]
m map[types.ValidatorIndex]*doublylinkedlist.Node[*ethpb.SignedVoluntaryExit]
}
// NewPool returns an initialized pool.
func NewPool() *Pool {
return &Pool{
pending: doublylinkedlist.List[*ethpb.SignedVoluntaryExit]{},
m: make(map[types.ValidatorIndex]*doublylinkedlist.Node[*ethpb.SignedVoluntaryExit]),
}
}
// PendingExits returns all objects from the pool.
func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
p.lock.RLock()
defer p.lock.RUnlock()
result := make([]*ethpb.SignedVoluntaryExit, p.pending.Len())
node := p.pending.First()
var err error
for i := 0; node != nil; i++ {
result[i], err = node.Value()
if err != nil {
return nil, err
}
node, err = node.Next()
if err != nil {
return nil, err
}
}
return result, nil
}
// ExitsForInclusion returns objects that are ready for inclusion at the given slot. This method will not
// return more than the block enforced MaxVoluntaryExits.
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
p.lock.RLock()
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
node := p.pending.First()
for node != nil && len(result) < length {
exit, err := node.Value()
if err != nil {
p.lock.RUnlock()
return nil, err
}
if exit.Exit.Epoch > slots.ToEpoch(slot) {
node, err = node.Next()
if err != nil {
p.lock.RUnlock()
return nil, err
}
continue
}
validator, err := state.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
logrus.WithError(err).Warningf("could not get validator at index %d", exit.Exit.ValidatorIndex)
node, err = node.Next()
if err != nil {
p.lock.RUnlock()
return nil, err
}
continue
}
if err = blocks.VerifyExitAndSignature(validator, state.Slot(), state.Fork(), exit, state.GenesisValidatorsRoot()); err != nil {
logrus.WithError(err).Warning("removing invalid exit from pool")
p.lock.RUnlock()
// MarkIncluded removes the invalid exit from the pool
p.MarkIncluded(exit)
p.lock.RLock()
} else {
result = append(result, exit)
}
node, err = node.Next()
if err != nil {
p.lock.RUnlock()
return nil, err
}
}
p.lock.RUnlock()
return result, nil
}
// InsertVoluntaryExit into the pool.
func (p *Pool) InsertVoluntaryExit(exit *ethpb.SignedVoluntaryExit) {
p.lock.Lock()
defer p.lock.Unlock()
_, exists := p.m[exit.Exit.ValidatorIndex]
if exists {
return
}
p.pending.Append(doublylinkedlist.NewNode(exit))
p.m[exit.Exit.ValidatorIndex] = p.pending.Last()
}
// MarkIncluded is used when an exit has been included in a beacon block. Every block seen by this
// node should call this method to include the exit. This will remove the exit from the pool.
func (p *Pool) MarkIncluded(exit *ethpb.SignedVoluntaryExit) {
p.lock.Lock()
defer p.lock.Unlock()
node := p.m[exit.Exit.ValidatorIndex]
if node == nil {
return
}
delete(p.m, exit.Exit.ValidatorIndex)
p.pending.Remove(node)
}

View File

@@ -0,0 +1,327 @@
package voluntaryexits
import (
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
func TestPendingExits(t *testing.T) {
t.Run("empty pool", func(t *testing.T) {
pool := NewPool()
changes, err := pool.PendingExits()
require.NoError(t, err)
assert.Equal(t, 0, len(changes))
})
t.Run("non-empty pool", func(t *testing.T) {
pool := NewPool()
pool.InsertVoluntaryExit(&ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 0,
},
})
pool.InsertVoluntaryExit(&ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 1,
},
})
changes, err := pool.PendingExits()
require.NoError(t, err)
assert.Equal(t, 2, len(changes))
})
}
func TestExitsForInclusion(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
stateSlot := types.Slot(uint64(params.BeaconConfig().ShardCommitteePeriod) * uint64(params.BeaconConfig().SlotsPerEpoch))
spb.Slot = stateSlot
numValidators := 2 * params.BeaconConfig().MaxVoluntaryExits
validators := make([]*ethpb.Validator, numValidators)
exits := make([]*ethpb.VoluntaryExit, numValidators)
privKeys := make([]common.SecretKey, numValidators)
for i := range validators {
v := &ethpb.Validator{}
if i == len(validators)-2 {
// exit for this validator is invalid
v.ExitEpoch = 0
} else {
v.ExitEpoch = params.BeaconConfig().FarFutureEpoch
}
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
v.PublicKey = pubkey
message := &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(i),
}
// exit for future slot
if i == len(validators)-1 {
message.Epoch = slots.ToEpoch(stateSlot) + 1
}
validators[i] = v
exits[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedExits := make([]*ethpb.SignedVoluntaryExit, numValidators)
for i, message := range exits {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainVoluntaryExit, privKeys[i])
require.NoError(t, err)
signed := &ethpb.SignedVoluntaryExit{
Exit: message,
Signature: signature,
}
signedExits[i] = signed
}
t.Run("empty pool", func(t *testing.T) {
pool := NewPool()
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, 0, len(exits))
})
t.Run("less than MaxVoluntaryExits in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxVoluntaryExits-1; i++ {
pool.InsertVoluntaryExit(signedExits[i])
}
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits)-1, len(exits))
})
t.Run("MaxVoluntaryExits in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxVoluntaryExits; i++ {
pool.InsertVoluntaryExit(signedExits[i])
}
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits), len(exits))
})
t.Run("more than MaxVoluntaryExits in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < numValidators; i++ {
pool.InsertVoluntaryExit(signedExits[i])
}
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits), len(exits))
for _, ch := range exits {
assert.NotEqual(t, types.ValidatorIndex(params.BeaconConfig().MaxVoluntaryExits), ch.Exit.ValidatorIndex)
}
})
t.Run("exit for future epoch not returned", func(t *testing.T) {
pool := NewPool()
pool.InsertVoluntaryExit(signedExits[len(signedExits)-1])
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, 0, len(exits))
})
t.Run("invalid exit not returned", func(t *testing.T) {
pool := NewPool()
pool.InsertVoluntaryExit(signedExits[len(signedExits)-2])
exits, err := pool.ExitsForInclusion(st, stateSlot)
require.NoError(t, err)
assert.Equal(t, 0, len(exits))
})
}
func TestInsertExit(t *testing.T) {
t.Run("empty pool", func(t *testing.T) {
pool := NewPool()
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
},
}
pool.InsertVoluntaryExit(exit)
require.Equal(t, 1, pool.pending.Len())
require.Equal(t, 1, len(pool.m))
n, ok := pool.m[0]
require.Equal(t, true, ok)
v, err := n.Value()
require.NoError(t, err)
assert.DeepEqual(t, exit, v)
})
t.Run("item in pool", func(t *testing.T) {
pool := NewPool()
old := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
},
}
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(1),
},
}
pool.InsertVoluntaryExit(old)
pool.InsertVoluntaryExit(exit)
require.Equal(t, 2, pool.pending.Len())
require.Equal(t, 2, len(pool.m))
n, ok := pool.m[0]
require.Equal(t, true, ok)
v, err := n.Value()
require.NoError(t, err)
assert.DeepEqual(t, old, v)
n, ok = pool.m[1]
require.Equal(t, true, ok)
v, err = n.Value()
require.NoError(t, err)
assert.DeepEqual(t, exit, v)
})
t.Run("validator index already exists", func(t *testing.T) {
pool := NewPool()
old := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
},
Signature: []byte("old"),
}
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
},
Signature: []byte("exit"),
}
pool.InsertVoluntaryExit(old)
pool.InsertVoluntaryExit(exit)
assert.Equal(t, 1, pool.pending.Len())
require.Equal(t, 1, len(pool.m))
n, ok := pool.m[0]
require.Equal(t, true, ok)
v, err := n.Value()
require.NoError(t, err)
assert.DeepEqual(t, old, v)
})
}
func TestMarkIncluded(t *testing.T) {
t.Run("one element in pool", func(t *testing.T) {
pool := NewPool()
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
}}
pool.InsertVoluntaryExit(exit)
pool.MarkIncluded(exit)
assert.Equal(t, 0, pool.pending.Len())
_, ok := pool.m[0]
assert.Equal(t, false, ok)
})
t.Run("first of multiple elements", func(t *testing.T) {
pool := NewPool()
first := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
}}
second := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(1),
}}
third := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(2),
}}
pool.InsertVoluntaryExit(first)
pool.InsertVoluntaryExit(second)
pool.InsertVoluntaryExit(third)
pool.MarkIncluded(first)
require.Equal(t, 2, pool.pending.Len())
_, ok := pool.m[0]
assert.Equal(t, false, ok)
})
t.Run("last of multiple elements", func(t *testing.T) {
pool := NewPool()
first := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
}}
second := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(1),
}}
third := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(2),
}}
pool.InsertVoluntaryExit(first)
pool.InsertVoluntaryExit(second)
pool.InsertVoluntaryExit(third)
pool.MarkIncluded(third)
require.Equal(t, 2, pool.pending.Len())
_, ok := pool.m[2]
assert.Equal(t, false, ok)
})
t.Run("in the middle of multiple elements", func(t *testing.T) {
pool := NewPool()
first := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
}}
second := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(1),
}}
third := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(2),
}}
pool.InsertVoluntaryExit(first)
pool.InsertVoluntaryExit(second)
pool.InsertVoluntaryExit(third)
pool.MarkIncluded(second)
require.Equal(t, 2, pool.pending.Len())
_, ok := pool.m[1]
assert.Equal(t, false, ok)
})
t.Run("not in pool", func(t *testing.T) {
pool := NewPool()
first := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(0),
}}
second := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(1),
}}
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: types.ValidatorIndex(2),
}}
pool.InsertVoluntaryExit(first)
pool.InsertVoluntaryExit(second)
pool.MarkIncluded(exit)
require.Equal(t, 2, pool.pending.Len())
_, ok := pool.m[0]
require.Equal(t, true, ok)
assert.NotNil(t, pool.m[0])
_, ok = pool.m[1]
require.Equal(t, true, ok)
assert.NotNil(t, pool.m[1])
})
}

View File

@@ -1,125 +0,0 @@
package voluntaryexits
import (
"context"
"sort"
"sync"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
// PoolManager maintains pending and seen voluntary exits.
// This pool is used by proposers to insert voluntary exits into new blocks.
type PoolManager interface {
PendingExits(state state.ReadOnlyBeaconState, slot primitives.Slot, noLimit bool) []*ethpb.SignedVoluntaryExit
InsertVoluntaryExit(ctx context.Context, state state.ReadOnlyBeaconState, exit *ethpb.SignedVoluntaryExit)
MarkIncluded(exit *ethpb.SignedVoluntaryExit)
}
// Pool is a concrete implementation of PoolManager.
type Pool struct {
lock sync.RWMutex
pending []*ethpb.SignedVoluntaryExit
}
// NewPool accepts a head fetcher (for reading the validator set) and returns an initialized
// voluntary exit pool.
func NewPool() *Pool {
return &Pool{
pending: make([]*ethpb.SignedVoluntaryExit, 0),
}
}
// PendingExits returns exits that are ready for inclusion at the given slot. This method will not
// return more than the block enforced MaxVoluntaryExits.
func (p *Pool) PendingExits(state state.ReadOnlyBeaconState, slot primitives.Slot, noLimit bool) []*ethpb.SignedVoluntaryExit {
p.lock.RLock()
defer p.lock.RUnlock()
// Allocate pending slice with a capacity of min(len(p.pending), maxVoluntaryExits) since the
// array cannot exceed the max and is typically less than the max value.
maxExits := params.BeaconConfig().MaxVoluntaryExits
if noLimit {
maxExits = uint64(len(p.pending))
}
pending := make([]*ethpb.SignedVoluntaryExit, 0, maxExits)
for _, e := range p.pending {
if e.Exit.Epoch > slots.ToEpoch(slot) {
continue
}
if v, err := state.ValidatorAtIndexReadOnly(e.Exit.ValidatorIndex); err == nil &&
v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch {
pending = append(pending, e)
if uint64(len(pending)) == maxExits {
break
}
}
}
return pending
}
// InsertVoluntaryExit into the pool. This method is a no-op if the pending exit already exists,
// or the validator is already exited.
func (p *Pool) InsertVoluntaryExit(ctx context.Context, state state.ReadOnlyBeaconState, exit *ethpb.SignedVoluntaryExit) {
ctx, span := trace.StartSpan(ctx, "exitPool.InsertVoluntaryExit")
defer span.End()
p.lock.Lock()
defer p.lock.Unlock()
// Prevent malformed messages from being inserted.
if exit == nil || exit.Exit == nil {
return
}
existsInPending, index := existsInList(p.pending, exit.Exit.ValidatorIndex)
// If the item exists in the pending list and includes a more favorable, earlier
// exit epoch, we replace it in the pending list. If it exists but the prior condition is false,
// we simply return.
if existsInPending {
if exit.Exit.Epoch < p.pending[index].Exit.Epoch {
p.pending[index] = exit
}
return
}
// Has the validator been exited already?
if v, err := state.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex); err != nil ||
v.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
return
}
// Insert into pending list and sort.
p.pending = append(p.pending, exit)
sort.Slice(p.pending, func(i, j int) bool {
return p.pending[i].Exit.ValidatorIndex < p.pending[j].Exit.ValidatorIndex
})
}
// MarkIncluded is used when an exit has been included in a beacon block. Every block seen by this
// node should call this method to include the exit. This will remove the exit from
// the pending exits slice.
func (p *Pool) MarkIncluded(exit *ethpb.SignedVoluntaryExit) {
p.lock.Lock()
defer p.lock.Unlock()
exists, index := existsInList(p.pending, exit.Exit.ValidatorIndex)
if exists {
// Exit we want is present at p.pending[index], so we remove it.
p.pending = append(p.pending[:index], p.pending[index+1:]...)
}
}
// Binary search to check if the index exists in the list of pending exits.
func existsInList(pending []*ethpb.SignedVoluntaryExit, searchingFor primitives.ValidatorIndex) (bool, int) {
i := sort.Search(len(pending), func(j int) bool {
return pending[j].Exit.ValidatorIndex >= searchingFor
})
if i < len(pending) && pending[i].Exit.ValidatorIndex == searchingFor {
return true, i
}
return false, -1
}

View File

@@ -1,530 +0,0 @@
package voluntaryexits
import (
"context"
"reflect"
"testing"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"google.golang.org/protobuf/proto"
)
func TestPool_InsertVoluntaryExit(t *testing.T) {
type fields struct {
pending []*ethpb.SignedVoluntaryExit
}
type args struct {
exit *ethpb.SignedVoluntaryExit
}
tests := []struct {
name string
fields fields
args args
want []*ethpb.SignedVoluntaryExit
}{
{
name: "Prevent inserting nil exit",
fields: fields{
pending: make([]*ethpb.SignedVoluntaryExit, 0),
},
args: args{
exit: nil,
},
want: []*ethpb.SignedVoluntaryExit{},
},
{
name: "Prevent inserting malformed exit",
fields: fields{
pending: make([]*ethpb.SignedVoluntaryExit, 0),
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: nil,
},
},
want: []*ethpb.SignedVoluntaryExit{},
},
{
name: "Empty list",
fields: fields{
pending: make([]*ethpb.SignedVoluntaryExit, 0),
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
{
name: "Duplicate identical exit",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
{
name: "Duplicate exit in pending list",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
{
name: "Duplicate validator index",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 20,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
{
name: "Duplicate received with more favorable exit epoch",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 1,
},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 4,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 4,
ValidatorIndex: 1,
},
},
},
},
{
name: "Exit for already exited validator",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 2,
},
},
},
want: []*ethpb.SignedVoluntaryExit{},
},
{
name: "Maintains sorted order",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 0,
},
},
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 2,
},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 10,
ValidatorIndex: 1,
},
},
},
want: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 0,
},
},
{
Exit: &ethpb.VoluntaryExit{
Epoch: 10,
ValidatorIndex: 1,
},
},
{
Exit: &ethpb.VoluntaryExit{
Epoch: 12,
ValidatorIndex: 2,
},
},
},
},
}
ctx := context.Background()
validators := []*ethpb.Validator{
{ // 0
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 1
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 2 - Already exited.
ExitEpoch: 15,
},
{ // 3
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pending: tt.fields.pending,
}
s, err := state_native.InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
p.InsertVoluntaryExit(ctx, s, tt.args.exit)
if len(p.pending) != len(tt.want) {
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pending), len(tt.want))
}
for i := range p.pending {
if !proto.Equal(p.pending[i], tt.want[i]) {
t.Errorf("Pending exit at index %d does not match expected. Got=%v wanted=%v", i, p.pending[i], tt.want[i])
}
}
})
}
}
func TestPool_MarkIncluded(t *testing.T) {
type fields struct {
pending []*ethpb.SignedVoluntaryExit
}
type args struct {
exit *ethpb.SignedVoluntaryExit
}
tests := []struct {
name string
fields fields
args args
want fields
}{
{
name: "Removes from pending list",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 1},
},
{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 2},
},
{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 3},
},
},
},
args: args{
exit: &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 2},
},
},
want: fields{
pending: []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 1},
},
{
Exit: &ethpb.VoluntaryExit{ValidatorIndex: 3},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pending: tt.fields.pending,
}
p.MarkIncluded(tt.args.exit)
if len(p.pending) != len(tt.want.pending) {
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pending), len(tt.want.pending))
}
for i := range p.pending {
if !proto.Equal(p.pending[i], tt.want.pending[i]) {
t.Errorf("Pending exit at index %d does not match expected. Got=%v wanted=%v", i, p.pending[i], tt.want.pending[i])
}
}
})
}
}
func TestPool_PendingExits(t *testing.T) {
type fields struct {
pending []*ethpb.SignedVoluntaryExit
noLimit bool
}
type args struct {
slot primitives.Slot
}
tests := []struct {
name string
fields fields
args args
want []*ethpb.SignedVoluntaryExit
}{
{
name: "Empty list",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{},
},
args: args{
slot: 100000,
},
want: []*ethpb.SignedVoluntaryExit{},
},
{
name: "All eligible",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
},
},
args: args{
slot: 1000000,
},
want: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
},
},
{
name: "All eligible, above max",
fields: fields{
noLimit: true,
pending: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
{Exit: &ethpb.VoluntaryExit{Epoch: 5}},
{Exit: &ethpb.VoluntaryExit{Epoch: 6}},
{Exit: &ethpb.VoluntaryExit{Epoch: 7}},
{Exit: &ethpb.VoluntaryExit{Epoch: 8}},
{Exit: &ethpb.VoluntaryExit{Epoch: 9}},
{Exit: &ethpb.VoluntaryExit{Epoch: 10}},
{Exit: &ethpb.VoluntaryExit{Epoch: 11}},
{Exit: &ethpb.VoluntaryExit{Epoch: 12}},
{Exit: &ethpb.VoluntaryExit{Epoch: 13}},
{Exit: &ethpb.VoluntaryExit{Epoch: 14}},
{Exit: &ethpb.VoluntaryExit{Epoch: 15}},
{Exit: &ethpb.VoluntaryExit{Epoch: 16}},
{Exit: &ethpb.VoluntaryExit{Epoch: 17}},
{Exit: &ethpb.VoluntaryExit{Epoch: 18}},
{Exit: &ethpb.VoluntaryExit{Epoch: 19}},
},
},
args: args{
slot: 1000000,
},
want: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
{Exit: &ethpb.VoluntaryExit{Epoch: 5}},
{Exit: &ethpb.VoluntaryExit{Epoch: 6}},
{Exit: &ethpb.VoluntaryExit{Epoch: 7}},
{Exit: &ethpb.VoluntaryExit{Epoch: 8}},
{Exit: &ethpb.VoluntaryExit{Epoch: 9}},
{Exit: &ethpb.VoluntaryExit{Epoch: 10}},
{Exit: &ethpb.VoluntaryExit{Epoch: 11}},
{Exit: &ethpb.VoluntaryExit{Epoch: 12}},
{Exit: &ethpb.VoluntaryExit{Epoch: 13}},
{Exit: &ethpb.VoluntaryExit{Epoch: 14}},
{Exit: &ethpb.VoluntaryExit{Epoch: 15}},
{Exit: &ethpb.VoluntaryExit{Epoch: 16}},
{Exit: &ethpb.VoluntaryExit{Epoch: 17}},
{Exit: &ethpb.VoluntaryExit{Epoch: 18}},
{Exit: &ethpb.VoluntaryExit{Epoch: 19}},
},
},
{
name: "All eligible, block max",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
{Exit: &ethpb.VoluntaryExit{Epoch: 5}},
{Exit: &ethpb.VoluntaryExit{Epoch: 6}},
{Exit: &ethpb.VoluntaryExit{Epoch: 7}},
{Exit: &ethpb.VoluntaryExit{Epoch: 8}},
{Exit: &ethpb.VoluntaryExit{Epoch: 9}},
{Exit: &ethpb.VoluntaryExit{Epoch: 10}},
{Exit: &ethpb.VoluntaryExit{Epoch: 11}},
{Exit: &ethpb.VoluntaryExit{Epoch: 12}},
{Exit: &ethpb.VoluntaryExit{Epoch: 13}},
{Exit: &ethpb.VoluntaryExit{Epoch: 14}},
{Exit: &ethpb.VoluntaryExit{Epoch: 15}},
{Exit: &ethpb.VoluntaryExit{Epoch: 16}},
{Exit: &ethpb.VoluntaryExit{Epoch: 17}},
{Exit: &ethpb.VoluntaryExit{Epoch: 18}},
{Exit: &ethpb.VoluntaryExit{Epoch: 19}},
},
},
args: args{
slot: 1000000,
},
want: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
{Exit: &ethpb.VoluntaryExit{Epoch: 5}},
{Exit: &ethpb.VoluntaryExit{Epoch: 6}},
{Exit: &ethpb.VoluntaryExit{Epoch: 7}},
{Exit: &ethpb.VoluntaryExit{Epoch: 8}},
{Exit: &ethpb.VoluntaryExit{Epoch: 9}},
{Exit: &ethpb.VoluntaryExit{Epoch: 10}},
{Exit: &ethpb.VoluntaryExit{Epoch: 11}},
{Exit: &ethpb.VoluntaryExit{Epoch: 12}},
{Exit: &ethpb.VoluntaryExit{Epoch: 13}},
{Exit: &ethpb.VoluntaryExit{Epoch: 14}},
{Exit: &ethpb.VoluntaryExit{Epoch: 15}},
},
},
{
name: "Some eligible",
fields: fields{
pending: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 3}},
{Exit: &ethpb.VoluntaryExit{Epoch: 4}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
},
},
args: args{
slot: 2 * params.BeaconConfig().SlotsPerEpoch,
},
want: []*ethpb.SignedVoluntaryExit{
{Exit: &ethpb.VoluntaryExit{Epoch: 0}},
{Exit: &ethpb.VoluntaryExit{Epoch: 2}},
{Exit: &ethpb.VoluntaryExit{Epoch: 1}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pending: tt.fields.pending,
}
s, err := state_native.InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{{ExitEpoch: params.BeaconConfig().FarFutureEpoch}}})
require.NoError(t, err)
if got := p.PendingExits(s, tt.args.slot, tt.fields.noLimit); !reflect.DeepEqual(got, tt.want) {
t.Errorf("PendingExits() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -86,7 +86,6 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",

View File

@@ -17,7 +17,6 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/async"
@@ -133,7 +132,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
}
s.host = h
s.host.RemoveStreamHandler(identify.IDDelta)
// Gossipsub registration is done before we add in any new peers
// due to libp2p's gossipsub implementation not taking into
// account previously added peers when creating the gossipsub

View File

@@ -165,7 +165,7 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
require.NoError(t, err)
s.stateNotifier = &mock.MockStateNotifier{}
// required params to addForkEntry in s.forkWatcher
s.genesisTime = time.Now()
beaconCfg := params.BeaconConfig().Copy()

View File

@@ -51,7 +51,8 @@ func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
// SetStreamHandlerMatch --
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, network.StreamHandler) {}
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(protocol.ID) bool, network.StreamHandler) {
}
// RemoveStreamHandler --
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}

View File

@@ -106,6 +106,7 @@ go_test(
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -56,6 +56,7 @@ func (e *blockIdParseError) Error() string {
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
// This is already a grpc error, so we can't wrap it any further
@@ -284,6 +285,7 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
}
// GetBlock retrieves block details for given block ID.
// DEPRECATED: please use GetBlockV2 instead
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
defer span.End()
@@ -307,6 +309,7 @@ func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*eth
}
// GetBlockSSZ returns the SSZ-serialized version of the becaon block for given block ID.
// DEPRECATED: please use GetBlockV2SSZ instead
func (bs *Server) GetBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockSSZResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZ")
defer span.End()

View File

@@ -12,6 +12,7 @@ import (
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
executionTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
@@ -1397,6 +1398,10 @@ func TestServer_GetBlockV2(t *testing.T) {
})
t.Run("Bellatrix", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
@@ -1519,6 +1524,10 @@ func TestServer_GetBlockV2(t *testing.T) {
})
t.Run("Capella", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
@@ -1825,6 +1834,10 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
})
t.Run("Bellatrix", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
@@ -1860,6 +1873,10 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
})
t.Run("Capella", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()

View File

@@ -253,16 +253,13 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
// ListPoolVoluntaryExits retrieves voluntary exits known by the node but
// not necessarily incorporated into any block.
func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.VoluntaryExitsPoolResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolVoluntaryExits")
_, span := trace.StartSpan(ctx, "beacon.ListPoolVoluntaryExits")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
sourceExits, err := bs.VoluntaryExitsPool.PendingExits()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
return nil, status.Error(codes.Internal, "Could not get exits from the pool")
}
sourceExits := bs.VoluntaryExitsPool.PendingExits(headState, headState.Slot(), true /* return unlimited exits */)
exits := make([]*ethpbv1.SignedVoluntaryExit, len(sourceExits))
for i, s := range sourceExits {
exits[i] = migration.V1Alpha1ExitToV1(s)
@@ -302,7 +299,7 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
return nil, status.Errorf(codes.InvalidArgument, "Invalid voluntary exit: %v", err)
}
bs.VoluntaryExitsPool.InsertVoluntaryExit(ctx, headState, alphaExit)
bs.VoluntaryExitsPool.InsertVoluntaryExit(alphaExit)
if err := bs.Broadcaster.Broadcast(ctx, alphaExit); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast voluntary exit object: %v", err)
}

View File

@@ -770,7 +770,8 @@ func TestSubmitVoluntaryExit_Ok(t *testing.T) {
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.NoError(t, err)
pendingExits := s.VoluntaryExitsPool.PendingExits(bs, bs.Slot(), true)
pendingExits, err := s.VoluntaryExitsPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(pendingExits))
assert.DeepEqual(t, migration.V1ExitToV1Alpha1(exit), pendingExits[0])
assert.Equal(t, true, broadcaster.BroadcastCalled)

View File

@@ -166,5 +166,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
// GetForkChoice returns a dump fork choice store.
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
ds.ForkFetcher.ForkChoicer().RLock()
defer ds.ForkFetcher.ForkChoicer().RUnlock()
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
}

View File

@@ -81,6 +81,7 @@ type SyncDetailsJson struct {
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
IsOptimistic bool `json:"is_optimistic"`
ElOffline bool `json:"el_offline"`
}
// SyncDetailsContainer is a wrapper for SyncDetails.

View File

@@ -39,7 +39,7 @@ func TestValidateSync(t *testing.T) {
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
assert.DeepEqual(
t,
[]string{"{\"sync_details\":{\"head_slot\":\"50\",\"sync_distance\":\"50\",\"is_syncing\":true,\"is_optimistic\":false}}"},
[]string{`{"sync_details":{"head_slot":"50","sync_distance":"50","is_syncing":true,"is_optimistic":false,"el_offline":false}}`},
v,
)
})

View File

@@ -12,6 +12,7 @@ go_library(
"//api/grpc:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
@@ -44,6 +45,7 @@ go_test(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -277,6 +277,7 @@ func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.S
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
IsSyncing: ns.SyncChecker.Syncing(),
IsOptimistic: isOptimistic,
ElOffline: !ns.ExecutionChainInfoFetcher.ExecutionClientConnected(),
},
}, nil
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers"
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
syncmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
@@ -172,10 +173,11 @@ func TestSyncStatus(t *testing.T) {
syncChecker.IsSyncing = true
s := &Server{
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
ExecutionChainInfoFetcher: &testutil.MockExecutionChainInfoFetcher{},
}
resp, err := s.GetSyncStatus(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
@@ -183,6 +185,7 @@ func TestSyncStatus(t *testing.T) {
assert.Equal(t, primitives.Slot(10), resp.Data.SyncDistance)
assert.Equal(t, true, resp.Data.IsSyncing)
assert.Equal(t, true, resp.Data.IsOptimistic)
assert.Equal(t, false, resp.Data.ElOffline)
}
func TestGetPeer(t *testing.T) {

View File

@@ -6,6 +6,7 @@ package node
import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/sync"
"google.golang.org/grpc"
@@ -15,13 +16,14 @@ import (
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
// version information.
type Server struct {
SyncChecker sync.Checker
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
SyncChecker sync.Checker
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
}

View File

@@ -691,13 +691,14 @@ func TestProduceBlockV2(t *testing.T) {
beaconState, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 64)
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
mockExecutionChain := &mockExecution.Chain{}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: mockExecutionChain,
Eth1InfoFetcher: mockExecutionChain,
Eth1BlockFetcher: mockExecutionChain,
@@ -705,10 +706,11 @@ func TestProduceBlockV2(t *testing.T) {
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db, doublylinkedtree.New()),
StateGen: stategen.New(db, mockChainService.ForkChoiceStore),
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
require.NoError(t, db.SaveGenesisData(ctx, beaconState))
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
for i := primitives.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
@@ -801,13 +803,14 @@ func TestProduceBlockV2(t *testing.T) {
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mochChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
mockExecutionChain := &mockExecution.Chain{}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: mochChainService,
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: mochChainService,
HeadUpdater: mochChainService,
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: mockExecutionChain,
Eth1InfoFetcher: mockExecutionChain,
Eth1BlockFetcher: mockExecutionChain,
@@ -817,8 +820,8 @@ func TestProduceBlockV2(t *testing.T) {
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db, doublylinkedtree.New()),
SyncCommitteePool: synccommittee.NewStore(),
TimeFetcher: mochChainService,
OptimisticModeFetcher: mochChainService,
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
@@ -993,7 +996,7 @@ func TestProduceBlockV2(t *testing.T) {
var payloadIdBytes enginev1.PayloadIDBytes
copy(payloadIdBytes[:], "payload_id")
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
mockExecutionChain := &mockExecution.Chain{}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
@@ -1007,6 +1010,7 @@ func TestProduceBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: mockExecutionChain,
Eth1InfoFetcher: mockExecutionChain,
Eth1BlockFetcher: mockExecutionChain,
@@ -1235,7 +1239,7 @@ func TestProduceBlockV2(t *testing.T) {
var payloadIdBytes enginev1.PayloadIDBytes
copy(payloadIdBytes[:], "payload_id")
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
mockExecutionChain := &mockExecution.Chain{}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
@@ -1249,6 +1253,7 @@ func TestProduceBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: mockExecutionChain,
Eth1InfoFetcher: mockExecutionChain,
Eth1BlockFetcher: mockExecutionChain,
@@ -1380,11 +1385,13 @@ func TestProduceBlockV2SSZ(t *testing.T) {
bs, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 2)
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -1541,11 +1548,13 @@ func TestProduceBlockV2SSZ(t *testing.T) {
require.NoError(t, db.SaveState(ctx, bs, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -1738,6 +1747,7 @@ func TestProduceBlockV2SSZ(t *testing.T) {
require.NoError(t, db.SaveState(ctx, bs, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
@@ -1745,11 +1755,12 @@ func TestProduceBlockV2SSZ(t *testing.T) {
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -1966,6 +1977,7 @@ func TestProduceBlockV2SSZ(t *testing.T) {
prevRandao, err := helpers.RandaoMix(bs, 2)
require.NoError(t, err)
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
@@ -1987,11 +1999,12 @@ func TestProduceBlockV2SSZ(t *testing.T) {
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -2216,11 +2229,13 @@ func TestProduceBlindedBlock(t *testing.T) {
beaconState, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 64)
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -2320,11 +2335,13 @@ func TestProduceBlindedBlock(t *testing.T) {
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -2453,6 +2470,11 @@ func TestProduceBlindedBlock(t *testing.T) {
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
ts := time.Now()
ti := ts.Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch+1)*params.BeaconConfig().SecondsPerSlot) * time.Second)
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
require.NoError(t, err)
stateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesisBlock := util.NewBeaconBlockBellatrix()
@@ -2461,9 +2483,6 @@ func TestProduceBlindedBlock(t *testing.T) {
parentRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
fb := util.HydrateSignedBeaconBlockBellatrix(&ethpbalpha.SignedBeaconBlockBellatrix{})
fb.Block.Body.ExecutionPayload.GasLimit = 123
wfb, err := blocks.NewSignedBeaconBlock(fb)
@@ -2474,12 +2493,10 @@ func TestProduceBlindedBlock(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
ti := time.Unix(0, 0)
ts, err := slots.ToTime(uint64(ti.Unix()), params.BeaconConfig().SlotsPerEpoch+1)
require.NoError(t, err)
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
bid := &ethpbalpha.BuilderBid{
Header: &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
@@ -2507,18 +2524,20 @@ func TestProduceBlindedBlock(t *testing.T) {
Signature: sk.Sign(sr[:]).Marshal(),
}
fcs := doublylinkedtree.New()
fcs.SetGenesisTime(uint64(ti.Unix()))
chainSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 1)
mockChainService := &mockChain.ChainService{Slot: &chainSlot, Genesis: ti, State: beaconState, Root: parentRoot[:], ForkChoiceStore: fcs, Block: wfb}
v1Alpha1Server := &v1alpha1validator.Server{
BeaconDB: db,
ForkFetcher: &mockChain.ChainService{ForkChoiceStore: doublylinkedtree.New()},
TimeFetcher: &mockChain.ChainService{
Genesis: ti,
},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:], Block: wfb},
OptimisticModeFetcher: &mockChain.ChainService{},
BeaconDB: db,
ForkFetcher: mockChainService,
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockExecution.Chain{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ChainStartFetcher: &mockExecution.Chain{GenesisState: beaconState},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
MockEth1Votes: true,
@@ -2623,7 +2642,7 @@ func TestProduceBlindedBlock(t *testing.T) {
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_BellatrixBlock)
require.Equal(t, true, ok)
blk := containerBlock.BellatrixBlock
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 33")
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
@@ -2667,6 +2686,10 @@ func TestProduceBlindedBlock(t *testing.T) {
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
ts := time.Now()
ti := ts.Add(-time.Duration(uint64(2*params.BeaconConfig().SlotsPerEpoch+1)*params.BeaconConfig().SecondsPerSlot) * time.Second)
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
stateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesisBlock := util.NewBeaconBlockCapella()
@@ -2688,10 +2711,6 @@ func TestProduceBlindedBlock(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
ti := time.Unix(0, 0)
ts, err := slots.ToTime(uint64(ti.Unix()), params.BeaconConfig().SlotsPerEpoch*2+1)
require.NoError(t, err)
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
require.NoError(t, err)
wds, err := beaconState.ExpectedWithdrawals()
@@ -2726,18 +2745,21 @@ func TestProduceBlindedBlock(t *testing.T) {
Signature: sk.Sign(sr[:]).Marshal(),
}
id := &enginev1.PayloadIDBytes{0x1}
chainSlot := primitives.Slot(2*params.BeaconConfig().SlotsPerEpoch + 1)
fcs := doublylinkedtree.New()
fcs.SetGenesisTime(uint64(ti.Unix()))
mockChainService := &mockChain.ChainService{Slot: &chainSlot, Genesis: ti, State: beaconState, Root: parentRoot[:], ForkChoiceStore: fcs, Block: wfb}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &enginev1.ExecutionPayloadCapella{BlockNumber: 1, Withdrawals: wds}, BlockValue: big.NewInt(0)},
BeaconDB: db,
ForkFetcher: &mockChain.ChainService{ForkChoiceStore: doublylinkedtree.New()},
TimeFetcher: &mockChain.ChainService{
Genesis: ti,
},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:], Block: wfb},
OptimisticModeFetcher: &mockChain.ChainService{},
ExecutionEngineCaller: &mockExecution.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &enginev1.ExecutionPayloadCapella{BlockNumber: 1, Withdrawals: wds}, BlockValue: big.NewInt(0)},
BeaconDB: db,
ForkFetcher: mockChainService,
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -2746,7 +2768,7 @@ func TestProduceBlindedBlock(t *testing.T) {
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
BLSChangesPool: blstoexec.NewPool(),
StateGen: stategen.New(db, doublylinkedtree.New()),
StateGen: stategen.New(db, fcs),
SyncCommitteePool: synccommittee.NewStore(),
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
BlockBuilder: &builderTest.MockBuilderService{
@@ -2877,11 +2899,13 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
bs, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 2)
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -3038,11 +3062,13 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
require.NoError(t, db.SaveState(ctx, bs, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -3235,18 +3261,20 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
require.NoError(t, db.SaveState(ctx, bs, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
@@ -3457,6 +3485,7 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
prevRandao, err := helpers.RandaoMix(bs, 2)
require.NoError(t, err)
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mockExecution.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
@@ -3478,11 +3507,12 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
HeadFetcher: mockChainService,
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},

View File

@@ -201,6 +201,7 @@ func (bs *Server) ListIndexedAttestations(
// StreamAttestations to clients at the end of every slot. This method retrieves the
// aggregated attestations currently in the pool at the start of a slot and sends
// them over a gRPC stream.
// DEPRECATED: This endpoint is superseded by the /eth/v1/events Beacon API endpoint
func (bs *Server) StreamAttestations(
_ *emptypb.Empty, stream ethpb.BeaconChain_StreamAttestationsServer,
) error {
@@ -235,6 +236,7 @@ func (bs *Server) StreamAttestations(
// StreamIndexedAttestations to clients at the end of every slot. This method retrieves the
// aggregated attestations currently in the pool, converts them into indexed form, and
// sends them over a gRPC stream.
// DEPRECATED: This endpoint is superseded by the /eth/v1/events Beacon API endpoint
func (bs *Server) StreamIndexedAttestations(
_ *emptypb.Empty, stream ethpb.BeaconChain_StreamIndexedAttestationsServer,
) error {

View File

@@ -271,6 +271,7 @@ func (bs *Server) GetChainHead(ctx context.Context, _ *emptypb.Empty) (*ethpb.Ch
}
// StreamBlocks to clients every single time a block is received by the beacon node.
// DEPRECATED: This endpoint is superseded by the /eth/v1/events Beacon API endpoint
func (bs *Server) StreamBlocks(req *ethpb.StreamBlocksRequest, stream ethpb.BeaconChain_StreamBlocksServer) error {
blocksChannel := make(chan *feed.Event, 1)
var blockSub event.Subscription
@@ -342,6 +343,7 @@ func (bs *Server) StreamBlocks(req *ethpb.StreamBlocksRequest, stream ethpb.Beac
}
// StreamChainHead to clients every single time the head block and state of the chain change.
// DEPRECATED: This endpoint is superseded by the /eth/v1/events Beacon API endpoint
func (bs *Server) StreamChainHead(_ *emptypb.Empty, stream ethpb.BeaconChain_StreamChainHeadServer) error {
stateChannel := make(chan *feed.Event, 1)
stateSub := bs.StateNotifier.StateFeed().Subscribe(stateChannel)

View File

@@ -558,10 +558,6 @@ func TestServer_ListBeaconBlocks_Genesis(t *testing.T) {
}
func runListBlocksGenesis(t *testing.T, blk interfaces.ReadOnlySignedBeaconBlock, blkContainer *ethpb.BeaconBlockContainer) {
resetFn := features.InitWithReset(&features.Flags{
EnableOnlyBlindedBeaconBlocks: true,
})
defer resetFn()
db := dbTest.SetupDB(t)
ctx := context.Background()
@@ -745,6 +741,10 @@ func TestServer_ListBeaconBlocks_Pagination(t *testing.T) {
runListBeaconBlocksPagination(t, orphanedB, blockCreator, containerCreator)
})
t.Run("bellatrix block", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 300
blockCreator := func(i primitives.Slot) interfaces.ReadOnlySignedBeaconBlock {
@@ -769,6 +769,10 @@ func TestServer_ListBeaconBlocks_Pagination(t *testing.T) {
runListBeaconBlocksPagination(t, orphanedB, blockCreator, containerCreator)
})
t.Run("capella block", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
blk := util.NewBeaconBlockCapella()
blk.Block.Slot = 300
blockCreator := func(i primitives.Slot) interfaces.ReadOnlySignedBeaconBlock {

View File

@@ -92,6 +92,8 @@ var (
// Note that this will stream information whilst syncing; this is intended, to allow for complete validator state capture
// over time. If this is not required then the client can either wait until the beacon node is synced, or filter results
// based on the epoch value in the returned validator info.
// DEPRECATED: Streaming Validator Info is no longer supported.
// The Beacon API /eth/v1/beacon/states/{state_id}/validators will provide validator info in unstreamed format.
func (bs *Server) StreamValidatorsInfo(stream ethpb.BeaconChain_StreamValidatorsInfoServer) error {
stateChannel := make(chan *feed.Event, params.BeaconConfig().SlotsPerEpoch)
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second

View File

@@ -27,6 +27,7 @@ go_library(
"@com_github_ipfs_go_log_v2//:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@org_golang_google_grpc//codes:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"github.com/golang/protobuf/ptypes/empty"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"google.golang.org/grpc/codes"
@@ -92,7 +93,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
aVersion = ""
}
peerInfo := &ethpb.DebugPeerResponse_PeerInfo{
Protocols: protocols,
Protocols: protocol.ConvertToStrings(protocols),
FaultCount: uint64(resp),
ProtocolVersion: pVersion,
AgentVersion: aVersion,

View File

@@ -236,6 +236,7 @@ func (ns *Server) GetETH1ConnectionStatus(_ context.Context, _ *empty.Empty) (*e
}
// StreamBeaconLogs from the beacon node via a gRPC server-side stream.
// DEPRECATED: This endpoint doesn't appear to be used and have been marked for deprecation.
func (ns *Server) StreamBeaconLogs(_ *empty.Empty, stream ethpb.Health_StreamBeaconLogsServer) error {
ch := make(chan []byte, ns.StreamLogsBufferSize)
sub := ns.LogsStreamer.LogsFeed().Subscribe(ch)

View File

@@ -45,7 +45,7 @@ func (vs *Server) ProposeExit(ctx context.Context, req *ethpb.SignedVoluntaryExi
},
})
vs.ExitPool.InsertVoluntaryExit(ctx, s, req)
vs.ExitPool.InsertVoluntaryExit(req)
r, err := req.Exit.HashTreeRoot()
if err != nil {

View File

@@ -47,9 +47,11 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
if err := vs.HeadUpdater.UpdateHead(ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
}
// process attestations and update head in forkchoice
vs.ForkFetcher.ForkChoicer().Lock()
vs.HeadUpdater.UpdateHead(ctx, vs.ForkFetcher.CurrentSlot())
parentRoot := vs.ForkFetcher.ForkChoicer().GetProposerHead()
vs.ForkFetcher.ForkChoicer().Unlock()
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
@@ -62,15 +64,11 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
}
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
}
head, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot, req.Slot)
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot[:], req.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
}
@@ -79,7 +77,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
sBlk.SetSlot(req.Slot)
sBlk.SetGraffiti(req.Graffiti)
sBlk.SetRandaoReveal(req.RandaoReveal)
sBlk.SetParentRoot(parentRoot)
sBlk.SetParentRoot(parentRoot[:])
// Set eth1 data.
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)

View File

@@ -45,6 +45,9 @@ func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
return true, errors.New("no fork choicer configured")
}
vs.ForkFetcher.ForkChoicer().RLock()
defer vs.ForkFetcher.ForkChoicer().RUnlock()
// Circuit breaker is active if the missing consecutive slots greater than `MaxBuilderConsecutiveMissedSlots`.
highestReceivedSlot := vs.ForkFetcher.ForkChoicer().HighestReceivedBlockSlot()
maxConsecutiveSkipSlotsAllowed := params.BeaconConfig().MaxBuilderConsecutiveMissedSlots
@@ -53,7 +56,7 @@ func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
return true, err
}
if diff > maxConsecutiveSkipSlotsAllowed {
if diff >= maxConsecutiveSkipSlotsAllowed {
log.WithFields(logrus.Fields{
"currentSlot": s,
"highestReceivedSlot": highestReceivedSlot,
@@ -77,7 +80,7 @@ func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
if err != nil {
return true, err
}
if diff > maxEpochSkipSlotsAllowed {
if diff >= maxEpochSkipSlotsAllowed {
log.WithFields(logrus.Fields{
"totalMissed": diff,
"maxEpochSkipSlotsAllowed": maxEpochSkipSlotsAllowed,

View File

@@ -42,7 +42,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
st, blkRoot, err := createState(1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
b, err = s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots + 1)
b, err = s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots)
require.NoError(t, err)
require.Equal(t, false, b)
@@ -119,7 +119,7 @@ func TestServer_canUseBuilder(t *testing.T) {
require.NoError(t, proposerServer.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []primitives.ValidatorIndex{0},
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: f, Pubkey: p}}))
reg, err = proposerServer.canUseBuilder(ctx, params.BeaconConfig().MaxBuilderConsecutiveMissedSlots, 0)
reg, err = proposerServer.canUseBuilder(ctx, params.BeaconConfig().MaxBuilderConsecutiveMissedSlots-1, 0)
require.NoError(t, err)
require.Equal(t, true, reg)
}

View File

@@ -1,28 +1,16 @@
package validator
import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
func (vs *Server) getExits(head state.BeaconState, slot primitives.Slot) []*ethpb.SignedVoluntaryExit {
exits := vs.ExitPool.PendingExits(head, slot, false /*noLimit*/)
validExits := make([]*ethpb.SignedVoluntaryExit, 0, len(exits))
for _, exit := range exits {
val, err := head.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
log.WithError(err).Warn("Could not retrieve validator index")
continue
}
if err := blocks.VerifyExitAndSignature(val, head.Slot(), head.Fork(), exit, head.GenesisValidatorsRoot()); err != nil {
log.WithError(err).Warn("Could not verify exit for block inclusion")
continue
}
validExits = append(validExits, exit)
exits, err := vs.ExitPool.ExitsForInclusion(head, slot)
if err != nil {
log.WithError(err).Error("Could not get exits")
return []*ethpb.SignedVoluntaryExit{}
}
return validExits
return exits
}

View File

@@ -1,7 +1,6 @@
package validator
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
@@ -28,7 +27,7 @@ func TestServer_getExits(t *testing.T) {
for i := primitives.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxVoluntaryExits; i++ {
exit, err := util.GenerateVoluntaryExits(beaconState, privKeys[i], i)
require.NoError(t, err)
proposerServer.ExitPool.InsertVoluntaryExit(context.Background(), beaconState, exit)
proposerServer.ExitPool.InsertVoluntaryExit(exit)
exits[i] = exit
}

View File

@@ -439,10 +439,12 @@ func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
mockChainService := &mock.ChainService{ForkChoiceStore: doublylinkedtree.New()}
proposerServer := &Server{
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
SyncChecker: &mockSync.Sync{},
HeadUpdater: &mock.ChainService{},
HeadUpdater: mockChainService,
ForkFetcher: mockChainService,
TimeFetcher: &mock.ChainService{}}
req := &ethpb.BlockRequest{
Slot: bellatrixSlot + 1,
@@ -455,14 +457,16 @@ func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
}
func getProposerServer(db db.HeadAccessDatabase, headState state.BeaconState, headRoot []byte) *Server {
mockChainService := &mock.ChainService{State: headState, Root: headRoot, ForkChoiceStore: doublylinkedtree.New()}
return &Server{
HeadFetcher: &mock.ChainService{State: headState, Root: headRoot},
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
BlockReceiver: mockChainService,
HeadUpdater: mockChainService,
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
ForkFetcher: mockChainService,
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),

View File

@@ -261,15 +261,16 @@ func (s *Service) Start() {
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
nodeServerV1 := &node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
beaconChainServer := &beaconv1alpha1.Server{

View File

@@ -6,6 +6,7 @@ import (
"context"
"flag"
"fmt"
"math"
"net/http"
"strings"
@@ -26,7 +27,7 @@ var (
debug = flag.Bool("debug", false, "Enable debug logging")
allowedOrigins = flag.String("corsdomain", "localhost:4242", "A comma separated list of CORS domains to allow")
enableDebugRPCEndpoints = flag.Bool("enable-debug-rpc-endpoints", false, "Enable debug rpc endpoints such as /eth/v1alpha1/beacon/state")
grpcMaxMsgSize = flag.Int("grpc-max-msg-size", 1<<22, "Integer to define max recieve message call size")
grpcMaxMsgSize = flag.Int("grpc-max-msg-size", math.MaxInt32, "Integer to define max recieve message call size")
httpModules = flag.String(
"http-modules",
strings.Join([]string{flags.PrysmAPIModule, flags.EthAPIModule}, ","),

View File

@@ -121,8 +121,7 @@ func convertRandaoMixes(indices []uint64, elements interface{}, convertAll bool)
func convertEth1DataVotes(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
return nil, errors.Errorf("Wanted type of %T but got %T", []*ethpb.Eth1Data{}, elements)
}
return handleEth1DataSlice(val, indices, convertAll)
}
@@ -130,8 +129,7 @@ func convertEth1DataVotes(indices []uint64, elements interface{}, convertAll boo
func convertValidators(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
return nil, errors.Errorf("Wanted type of %T but got %T", []*ethpb.Validator{}, elements)
}
return handleValidatorSlice(val, indices, convertAll)
}
@@ -139,8 +137,7 @@ func convertValidators(indices []uint64, elements interface{}, convertAll bool)
func convertAttestations(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.PendingAttestation)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
return nil, errors.Errorf("Wanted type of %T but got %T", []*ethpb.PendingAttestation{}, elements)
}
return handlePendingAttestationSlice(val, indices, convertAll)
}
@@ -148,8 +145,7 @@ func convertAttestations(indices []uint64, elements interface{}, convertAll bool
func convertBalances(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]uint64)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
return nil, errors.Errorf("Wanted type of %T but got %T", []uint64{}, elements)
}
return handleBalanceSlice(val, indices, convertAll)
}

View File

@@ -3,7 +3,6 @@ package fieldtrie
import (
"encoding/binary"
"fmt"
"reflect"
"sync"
"testing"
@@ -270,7 +269,7 @@ func TestFieldTrie_NativeState_fieldConvertersNative(t *testing.T) {
convertAll: true,
},
wantHex: nil,
errMsg: fmt.Sprintf("Wanted type of %v", reflect.TypeOf([]*ethpb.Eth1Data{}).Name()),
errMsg: fmt.Sprintf("Wanted type of %T", []*ethpb.Eth1Data{}),
},
{
name: "Balance",
@@ -305,7 +304,7 @@ func TestFieldTrie_NativeState_fieldConvertersNative(t *testing.T) {
convertAll: true,
},
wantHex: nil,
errMsg: fmt.Sprintf("Wanted type of %v", reflect.TypeOf([]*ethpb.Validator{}).Name()),
errMsg: fmt.Sprintf("Wanted type of %T", []*ethpb.Validator{}),
},
{
name: "Attestations",

Some files were not shown because too many files have changed in this diff Show More