mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
78 Commits
blob-by-ro
...
ensure-e2e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94372214f7 | ||
|
|
5ea6e32d6c | ||
|
|
ab407809f0 | ||
|
|
4d28d69fd9 | ||
|
|
3802761088 | ||
|
|
2669c93375 | ||
|
|
ce32453c7b | ||
|
|
ee8cb8bd6f | ||
|
|
be4ef54482 | ||
|
|
0c025ab719 | ||
|
|
b180a7de81 | ||
|
|
5c234c8c68 | ||
|
|
f92d492e33 | ||
|
|
a926028e45 | ||
|
|
49f0c44dfe | ||
|
|
22172b79cb | ||
|
|
8aec170f9b | ||
|
|
cc764c346b | ||
|
|
7d5d30ac94 | ||
|
|
f6eb42b761 | ||
|
|
39fe29d8f4 | ||
|
|
81fbfceea8 | ||
|
|
e258f256c6 | ||
|
|
a0ff5ff792 | ||
|
|
4528ea8d0d | ||
|
|
00b9e484e5 | ||
|
|
5eaa152589 | ||
|
|
6d3ff65635 | ||
|
|
83a294c1a5 | ||
|
|
753e285fb6 | ||
|
|
525d3b05a6 | ||
|
|
4f38ba38b7 | ||
|
|
eb0b5a6146 | ||
|
|
4356cbc352 | ||
|
|
0893821e35 | ||
|
|
c3346fefa7 | ||
|
|
d639a26bbe | ||
|
|
bb95d951cc | ||
|
|
78d49fda13 | ||
|
|
3aaba7c065 | ||
|
|
b8a1bcdfe3 | ||
|
|
93514de00f | ||
|
|
86a883aa19 | ||
|
|
c000e8fde5 | ||
|
|
75e5887f07 | ||
|
|
4ca3c5b058 | ||
|
|
25d06d41be | ||
|
|
0a87210514 | ||
|
|
196798eacc | ||
|
|
17fe935343 | ||
|
|
ac4483417d | ||
|
|
0d3fb0a32b | ||
|
|
3d337b07e1 | ||
|
|
11b90e1f63 | ||
|
|
3c73bac798 | ||
|
|
91fee5db17 | ||
|
|
155b0c161e | ||
|
|
a7010d817d | ||
|
|
c0dd233a1c | ||
|
|
c391fad258 | ||
|
|
e92b546a36 | ||
|
|
765345ac3a | ||
|
|
ec13d52f03 | ||
|
|
08ebc99bc3 | ||
|
|
8918e8c274 | ||
|
|
0e4185b40c | ||
|
|
383edb3553 | ||
|
|
40589905bc | ||
|
|
ae76240f83 | ||
|
|
096cf7b8c1 | ||
|
|
7c3027801b | ||
|
|
8f6d5ff075 | ||
|
|
c379c9ea47 | ||
|
|
75e8f85ba5 | ||
|
|
8d82ca08ab | ||
|
|
29f645f0cc | ||
|
|
15016555f5 | ||
|
|
98d2d5f324 |
178
.bazelrc
178
.bazelrc
@@ -1,9 +1,9 @@
|
||||
# Print warnings for tests with inappropriate test size or timeout.
|
||||
test --test_verbose_timeout_warnings
|
||||
|
||||
# Only build test targets when running bazel test //...
|
||||
test --build_tests_only
|
||||
test --test_output=errors
|
||||
# Import bazelrc presets
|
||||
import %workspace%/build/bazelrc/convenience.bazelrc
|
||||
import %workspace%/build/bazelrc/correctness.bazelrc
|
||||
import %workspace%/build/bazelrc/cross.bazelrc
|
||||
import %workspace%/build/bazelrc/debug.bazelrc
|
||||
import %workspace%/build/bazelrc/performance.bazelrc
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
@@ -11,21 +11,9 @@ test:e2e --define gotags=debug
|
||||
# Clearly indicate that coverage is enabled to disable certain nogo checks.
|
||||
coverage --define=coverage_enabled=1
|
||||
|
||||
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
|
||||
build --host_force_python=PY2
|
||||
run --host_force_python=PY2
|
||||
|
||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
||||
# is required within the sandbox. Network sandboxing only works on linux.
|
||||
build --sandbox_default_allow_network=false
|
||||
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
run --incompatible_strict_action_env
|
||||
|
||||
build --define blst_disabled=false
|
||||
run --define blst_disabled=false
|
||||
|
||||
@@ -68,42 +56,6 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
|
||||
build:cgo_symbolizer -c dbg
|
||||
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
|
||||
|
||||
# multi-arch cross-compiling toolchain configs:
|
||||
-----------------------------------------------
|
||||
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
|
||||
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
|
||||
|
||||
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
|
||||
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
|
||||
|
||||
# osx_amd64 config for cross compiler toolchain
|
||||
build:osx_amd64 --config=cross
|
||||
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
|
||||
build:osx_amd64 --compiler=osxcross
|
||||
|
||||
# osx_arm64 config for cross compiler toolchain
|
||||
build:osx_arm64 --config=cross
|
||||
build:osx_arm64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64_cgo
|
||||
build:osx_arm64 --compiler=osxcross
|
||||
build:osx_arm64 --cpu=aarch64
|
||||
|
||||
# windows
|
||||
build:windows_amd64 --config=cross
|
||||
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
|
||||
build:windows_amd64 --compiler=mingw-w64
|
||||
|
||||
# linux_arm64 conifg for cross compiler toolchain
|
||||
build:linux_arm64 --config=cross
|
||||
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
|
||||
build:linux_arm64 --copt=-funsafe-math-optimizations
|
||||
build:linux_arm64 --copt=-ftree-vectorize
|
||||
build:linux_arm64 --copt=-fomit-frame-pointer
|
||||
build:linux_arm64 --cpu=aarch64
|
||||
build:linux_arm64 --compiler=clang
|
||||
build:linux_arm64 --copt=-march=armv8-a
|
||||
|
||||
|
||||
# toolchain build debug configs
|
||||
#------------------------------
|
||||
build:debug --sandbox_debug
|
||||
@@ -111,123 +63,5 @@ build:debug --toolchain_resolution_debug
|
||||
build:debug --verbose_failures
|
||||
build:debug -s
|
||||
|
||||
# windows debug
|
||||
build:windows_amd64_debug --config=windows_amd64
|
||||
build:windows_amd64_debug --config=debug
|
||||
|
||||
# osx_amd64 debug config
|
||||
build:osx_amd64_debug --config=debug
|
||||
build:osx_amd64_debug --config=osx_amd64
|
||||
|
||||
# osx_arm64 debug config
|
||||
build:osx_arm64_debug --config=debug
|
||||
build:osx_arm64_debug --config=osx_arm64
|
||||
|
||||
# linux_arm64_debug
|
||||
build:linux_arm64_debug --config=linux_arm64
|
||||
build:linux_arm64_debug --config=debug
|
||||
|
||||
# linux_amd64_debug
|
||||
build:linux_amd64_debug --config=linux_amd64
|
||||
build:linux_amd64_debug --config=debug
|
||||
|
||||
|
||||
# Docker Sandbox Configs
|
||||
#-----------------------
|
||||
# Note all docker sandbox configs must run from a linux x86_64 host
|
||||
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
|
||||
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
|
||||
build:docker-sandbox --define=EXECUTOR=remote
|
||||
build:docker-sandbox --experimental_docker_verbose
|
||||
build:docker-sandbox --experimental_enable_docker_sandbox
|
||||
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
||||
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
|
||||
|
||||
# windows_amd64 docker sandbox build config
|
||||
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
|
||||
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
|
||||
|
||||
# osx_amd64 docker sandbox build config
|
||||
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
|
||||
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
|
||||
|
||||
# osx_arm64 docker sandbox build config
|
||||
build:osx_arm64_docker --config=docker-sandbox --config=osx_arm64
|
||||
build:osx_arm64_docker_debug --config=osx_arm64_docker --config=debug
|
||||
|
||||
# linux_arm64 docker sandbox build config
|
||||
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
|
||||
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
|
||||
|
||||
# linux_amd64 docker sandbox build config
|
||||
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
|
||||
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
|
||||
|
||||
|
||||
# Remote Build Execution
|
||||
#-----------------------
|
||||
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
|
||||
#
|
||||
# Depending on how many machines are in the remote execution instance, setting
|
||||
# this higher can make builds faster by allowing more jobs to run in parallel.
|
||||
# Setting it too high can result in jobs that timeout, however, while waiting
|
||||
# for a remote machine to execute them.
|
||||
build:remote --jobs=50
|
||||
|
||||
# Set several flags related to specifying the platform, toolchain and java
|
||||
# properties.
|
||||
# These flags should only be used as is for the rbe-ubuntu16-04 container
|
||||
# and need to be adapted to work with other toolchain containers.
|
||||
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
||||
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
|
||||
# Platform flags:
|
||||
# The toolchain container used for execution is defined in the target indicated
|
||||
# by "extra_execution_platforms", "host_platform" and "platforms".
|
||||
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
|
||||
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
|
||||
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
|
||||
build:remote --platforms=@rbe_ubuntu_clang//config:platform
|
||||
|
||||
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
|
||||
# defined. See https://github.com/bazelbuild/bazel/issues/7480
|
||||
build:remote --define=EXECUTOR=remote
|
||||
|
||||
# Enable remote execution so actions are performed on the remote systems.
|
||||
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
||||
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote --incompatible_strict_action_env=true
|
||||
|
||||
# Set a higher timeout value, just in case.
|
||||
build:remote --remote_timeout=3600
|
||||
|
||||
# Enable authentication. This will pick up application default credentials by
|
||||
# default. You can use --google_credentials=some_file.json to use a service
|
||||
# account credential instead.
|
||||
# build:remote --google_default_credentials=true
|
||||
|
||||
# Enable build without the bytes
|
||||
# See: https://github.com/bazelbuild/bazel/issues/6862
|
||||
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
|
||||
|
||||
build:remote --remote_local_fallback
|
||||
|
||||
# Ignore GoStdLib with remote caching
|
||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
@@ -1 +1 @@
|
||||
5.3.0
|
||||
6.1.0
|
||||
|
||||
83
WORKSPACE
83
WORKSPACE
@@ -4,15 +4,18 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_toolchains",
|
||||
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
|
||||
strip_prefix = "bazel-toolchains-3.7.0",
|
||||
name = "rules_pkg",
|
||||
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
|
||||
@@ -39,10 +42,6 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
|
||||
|
||||
rbe_toolchains_config()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
@@ -76,9 +75,8 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
|
||||
strip_prefix = "rules_docker-0.19.0",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
|
||||
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -88,10 +86,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "ae013bf35bd23234d1dea46b079f1e05ba74ac0321423830119d3e787ec73483",
|
||||
sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -122,32 +120,36 @@ load(
|
||||
"container_pull",
|
||||
)
|
||||
|
||||
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
|
||||
container_pull(
|
||||
name = "cc_image_base",
|
||||
digest = "sha256:41036fc7ed8df0f6addc18484cef0c94a85867508967789f947e11ffd5ff0cc8",
|
||||
name = "cc_image_base_amd64",
|
||||
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/cc",
|
||||
repository = "distroless/cc-debian11",
|
||||
)
|
||||
|
||||
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
|
||||
container_pull(
|
||||
name = "cc_debug_image_base",
|
||||
digest = "sha256:6865ad48467c89c3c3524d4c426f52ad12d9ab7dec31fad31fae69da40eb6445",
|
||||
name = "cc_debug_image_base_amd64",
|
||||
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/cc",
|
||||
repository = "distroless/cc-debian11",
|
||||
)
|
||||
|
||||
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
|
||||
container_pull(
|
||||
name = "go_image_base",
|
||||
digest = "sha256:b9b124f955961599e72630654107a0cf04e08e6fa777fa250b8f840728abd770",
|
||||
name = "go_image_base_amd64",
|
||||
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/base",
|
||||
repository = "distroless/base-debian11",
|
||||
)
|
||||
|
||||
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
|
||||
container_pull(
|
||||
name = "go_debug_image_base",
|
||||
digest = "sha256:65668d2b78d25df3d8ccf5a778d017fcaba513b52078c700083eaeef212b9979",
|
||||
name = "go_debug_image_base_amd64",
|
||||
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/base",
|
||||
repository = "distroless/base-debian11",
|
||||
)
|
||||
|
||||
container_pull(
|
||||
@@ -162,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.19.4",
|
||||
go_version = "1.19.7",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -188,6 +190,21 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eip4881_spec_tests",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
@@ -282,9 +299,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
|
||||
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
|
||||
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
|
||||
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -315,9 +332,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b2226874526805d64c29e5053fa28e511b57c0860585d6d59777ee81ff4859ca",
|
||||
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.2/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -103,8 +103,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
|
||||
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
|
||||
@@ -398,11 +398,7 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
||||
if err := st.SetValidators(validators); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.SetBalances(balances); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return st.SetBalances(balances)
|
||||
}
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -33,6 +34,7 @@ const (
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getConfigSpecPath = "/eth/v1/config/spec"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
@@ -252,6 +254,20 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
|
||||
body, err := c.get(ctx, getConfigSpecPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting configSpecPath")
|
||||
}
|
||||
fsr := &v1.SpecResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsr, nil
|
||||
}
|
||||
|
||||
type NodeVersion struct {
|
||||
implementation string
|
||||
semver string
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/authorization"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -296,7 +295,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||
defer cancel()
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
@@ -323,7 +325,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||
defer cancel()
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
@@ -342,29 +347,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockCapella calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full ExecutionPayloadCapella used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlockCapella(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockCapella) (*v1.ExecutionPayloadCapella, error) {
|
||||
v := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: sb}
|
||||
body, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||
defer cancel()
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseCapella{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
return ep.ToProto()
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
// response, and an error response may have an error message. This method will return a nil value for error in the
|
||||
// happy path, and an error with information about the server response body for a non-200 response.
|
||||
|
||||
@@ -263,6 +263,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -288,6 +289,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
|
||||
@@ -33,7 +33,7 @@ type ChainInfoFetcher interface {
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
UpdateHead(context.Context) error
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -84,7 +84,7 @@ type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
}
|
||||
|
||||
@@ -96,24 +96,32 @@ type OptimisticModeFetcher interface {
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().JustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint from store.
|
||||
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().BestJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
@@ -277,6 +285,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
@@ -289,6 +299,8 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
@@ -330,6 +342,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
headRoot := s.head.root
|
||||
s.headLock.RUnlock()
|
||||
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
@@ -345,6 +359,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
@@ -356,10 +372,21 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
}
|
||||
|
||||
// InForkchoice returns true if the given root is found in forkchoice
|
||||
// This in particular means that the blockroot is a descendant of the
|
||||
// finalized checkpoint
|
||||
func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.ForkChoicer().HasNode(root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -2,14 +2,22 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) isNewProposer() bool {
|
||||
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* root */)
|
||||
func (s *Service) isNewProposer(slot primitives.Slot) bool {
|
||||
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -40,12 +48,18 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
return headState, newHeadBlock, nil
|
||||
}
|
||||
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
|
||||
isNewHead := s.isNewHead(newHeadRoot)
|
||||
if !isNewHead && !s.isNewProposer() {
|
||||
if !isNewHead {
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewProposer := s.isNewProposer(proposingSlot)
|
||||
if isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
@@ -58,19 +72,56 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if isNewHead {
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
// reorged or not by the next proposer.
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
return true
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
params.BeaconConfig().SecondsPerSlot)
|
||||
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||
} else {
|
||||
if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
@@ -21,10 +22,10 @@ import (
|
||||
func TestService_isNewProposer(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.Equal(t, false, service.isNewProposer())
|
||||
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
|
||||
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||
require.Equal(t, true, service.isNewProposer())
|
||||
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
|
||||
}
|
||||
|
||||
func TestService_isNewHead(t *testing.T) {
|
||||
@@ -75,7 +76,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot()))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1))
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "could not get state summary: could not find block in DB"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
@@ -83,7 +84,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1))
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
@@ -107,7 +108,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
@@ -124,7 +125,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
|
||||
@@ -134,7 +135,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1))
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
@@ -184,7 +185,52 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
|
||||
// Set head to be the same but proposing next slot
|
||||
service.head.root = r
|
||||
service.head.block = sb
|
||||
service.head.state = st
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r))
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
|
||||
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
require.NoError(t, err)
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
|
||||
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
|
||||
require.LogsDoNotContain(t, hook, "12 seconds")
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
|
||||
require.LogsContain(t, hook, "12 seconds")
|
||||
|
||||
head, err := fcs.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ type head struct {
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
// Caller of the method MUST aqcuire a lock on forkchoice.
|
||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
@@ -122,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
reorgDistance.Observe(float64(dis))
|
||||
reorgDepth.Observe(float64(dep))
|
||||
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
|
||||
@@ -111,6 +111,18 @@ var (
|
||||
Name: "beacon_reorgs_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
LateBlockAttemptedReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_late_block_attempted_reorgs",
|
||||
Help: "Count the number of times a proposer served by this beacon has attempted a late block reorg",
|
||||
})
|
||||
lateBlockFailedAttemptFirstThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_failed_reorg_attempts_first_threshold",
|
||||
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the first threshold",
|
||||
})
|
||||
lateBlockFailedAttemptSecondThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_failed_reorg_attempts_second_threshold",
|
||||
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the second threshold",
|
||||
})
|
||||
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "saved_orphaned_att_total",
|
||||
Help: "Count the number of times an orphaned attestation is saved",
|
||||
@@ -158,10 +170,6 @@ var (
|
||||
Name: "txs_per_slot_count",
|
||||
Help: "Count the number of txs per slot",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
|
||||
@@ -37,7 +36,7 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -63,7 +62,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -72,11 +71,11 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
||||
// Note that LMD GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -118,7 +117,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := service.OnAttestation(ctx, tt.a)
|
||||
err := service.OnAttestation(ctx, tt.a, 0)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -155,7 +154,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
@@ -324,96 +323,3 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
|
||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, bState, r32))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, jc))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -107,6 +107,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
@@ -147,9 +152,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
if err := s.handleBlockBLSToExecChanges(signed.Block()); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
@@ -209,7 +211,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot); err != nil {
|
||||
// verify conditions for FCU, notifies FCU, and saves the new head.
|
||||
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -442,12 +446,22 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
}
|
||||
}
|
||||
}
|
||||
// Save boundary states that will be useful for forkchoice
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
@@ -457,17 +471,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
@@ -567,6 +571,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
@@ -591,8 +596,8 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
}
|
||||
|
||||
// This removes the attestations in block `b` from the attestation mem pool.
|
||||
func (s *Service) pruneAttsFromPool(b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := b.Block().Body().Attestations()
|
||||
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := headBlock.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
@@ -661,15 +666,15 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
||||
break
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case ti := <-ticker.C:
|
||||
if err := s.fillMissingBlockPayloadId(ctx, ti); err != nil {
|
||||
case <-ticker.C():
|
||||
if err := s.fillMissingBlockPayloadId(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not fill missing payload ID")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
@@ -677,36 +682,34 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns true if time `t` is halfway through the slot in sec.
|
||||
func atHalfSlot(t time.Time) bool {
|
||||
s := params.BeaconConfig().SecondsPerSlot
|
||||
return uint64(t.Second())%s == s/2
|
||||
}
|
||||
|
||||
func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) error {
|
||||
if !atHalfSlot(ti) {
|
||||
return nil
|
||||
}
|
||||
if s.CurrentSlot() == s.cfg.ForkChoiceStore.HighestReceivedBlockSlot() {
|
||||
// fillMissingBlockPayloadId is called 4 seconds into the slot and calls FCU if we are proposing next slot
|
||||
// and the cache has been missed
|
||||
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
s.ForkChoicer().RLock()
|
||||
highestReceivedSlot := s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
s.ForkChoicer().RUnlock()
|
||||
if s.CurrentSlot() == highestReceivedSlot {
|
||||
return nil
|
||||
}
|
||||
// Head root should be empty when retrieving proposer index for the next slot.
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if has && id == [8]byte{} {
|
||||
missedPayloadIDFilledCount.Inc()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: s.headState(ctx),
|
||||
headRoot: s.headRoot(),
|
||||
headBlock: headBlock.Block(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !has || id != [8]byte{} {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
return err
|
||||
}
|
||||
headState := s.headState(ctx)
|
||||
headRoot := s.headRoot()
|
||||
s.headLock.RUnlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v3/math"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -71,10 +69,6 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
if err := s.VerifyFinalizedBlkDescendant(ctx, parentRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -88,35 +82,6 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedBlkDescendant validates if input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
|
||||
defer span.End()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
fRoot := s.ensureRootNotZeros(finalized.Root)
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if bFinalizedRoot == nil {
|
||||
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
|
||||
}
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
@@ -272,7 +237,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
return errNotDescendantOfFinalized
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
@@ -65,6 +65,10 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
ojc := ðpb.Checkpoint{}
|
||||
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := util.NewBeaconBlock()
|
||||
@@ -73,11 +77,16 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, random)
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
|
||||
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -108,10 +117,11 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot[:]
|
||||
b.Block.Slot = 2
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "is not a descendant of the current finalized block",
|
||||
wantErrString: "not descendant of finalized checkpoint",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
@@ -441,7 +451,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, errNotDescendantOfFinalized.Error(), err.Error())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
@@ -707,87 +717,6 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
assert.Equal(t, root, r, "Did not get wanted justified root")
|
||||
}
|
||||
|
||||
func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 32
|
||||
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b1)
|
||||
|
||||
type args struct {
|
||||
parentRoot [32]byte
|
||||
finalizedRoot [32]byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
invalidBlockRoot bool
|
||||
}{
|
||||
{
|
||||
name: "could not get finalized block in block service cache",
|
||||
args: args{
|
||||
finalizedRoot: [32]byte{'a'},
|
||||
},
|
||||
wantedErr: "block not found in cache or db",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block root in DB",
|
||||
args: args{
|
||||
finalizedRoot: r,
|
||||
parentRoot: [32]byte{'a'},
|
||||
},
|
||||
wantedErr: "could not get finalized block root",
|
||||
},
|
||||
{
|
||||
name: "is not descendant",
|
||||
args: args{
|
||||
finalizedRoot: r1,
|
||||
parentRoot: r,
|
||||
},
|
||||
wantedErr: "is not a descendant of the current finalized block slot",
|
||||
invalidBlockRoot: true,
|
||||
},
|
||||
{
|
||||
name: "is descendant",
|
||||
args: args{
|
||||
finalizedRoot: r,
|
||||
parentRoot: r,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: tt.args.finalizedRoot, Epoch: 1}))
|
||||
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
if tt.invalidBlockRoot {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
} else if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsNoDB()
|
||||
@@ -2140,63 +2069,14 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Check that the head is still INVALID and the node is optimistic
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import blocks 21--23
|
||||
for i := 21; i < 24; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Head should still be INVALID and the node is optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// Import block 24, it should justify Epoch 3 and become HEAD, the node
|
||||
// recovers
|
||||
driftGenesisTime(service, 24, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 24)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.ForkChoicer().SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
|
||||
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, primitives.Epoch(3), sjc.Epoch)
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
@@ -2284,7 +2164,7 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx), 0)
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -19,6 +21,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
|
||||
// attestations to see if we will reorg the incoming block
|
||||
const reorgLateBlockCountAttestations = 2 * time.Second
|
||||
|
||||
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
|
||||
// root of an attestation's target checkpoint.
|
||||
type AttestationStateFetcher interface {
|
||||
@@ -29,7 +35,7 @@ type AttestationStateFetcher interface {
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
InForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
// AttestationTargetState returns the pre state of attestation.
|
||||
@@ -60,33 +66,6 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
|
||||
// When the input root is not be consistent with finalized store then we know it is not
|
||||
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
blockRoot := bytesutil.ToBytes32(root)
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
ss, err := slots.EpochStart(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, root, ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(f.Root, r) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
@@ -115,32 +94,40 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-pat.C():
|
||||
s.ForkChoicer().Lock()
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
|
||||
s.ForkChoicer().Unlock()
|
||||
case <-st.C():
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
}
|
||||
|
||||
if err := s.UpdateHead(s.ctx); err != nil {
|
||||
log.WithError(err).Error("Could not process attestations and update head")
|
||||
}
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot())
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
// It requires no external inputs.
|
||||
func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
// Only one process can process attestations and update head at a time.
|
||||
s.processAttestationsLock.Lock()
|
||||
defer s.processAttestationsLock.Unlock()
|
||||
|
||||
// The caller of this function MUST hold a lock in forkchoice
|
||||
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx)
|
||||
|
||||
// This function is only called at 10 seconds or 0 seconds into the slot
|
||||
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
|
||||
if !features.Get().DisableReorgLateBlocks {
|
||||
disparity += reorgLateBlockCountAttestations
|
||||
}
|
||||
s.processAttestations(ctx, disparity)
|
||||
|
||||
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
@@ -158,21 +145,20 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, newHeadRoot); err != nil {
|
||||
return err
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestations(ctx context.Context) {
|
||||
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
|
||||
atts := s.cfg.AttPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -190,7 +176,7 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
@@ -207,11 +193,11 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
if err := s.OnAttestation(ctx, att); err != nil {
|
||||
if err := s.OnAttestation(ctx, att, disparity); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
service.processAttestations(ctx, 0)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
}
|
||||
@@ -183,10 +183,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
service.head.root = r // Old head
|
||||
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
|
||||
service.UpdateHead(ctx, 0)
|
||||
require.Equal(t, tRoot, service.headRoot())
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
|
||||
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
|
||||
}
|
||||
|
||||
func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
@@ -236,9 +235,8 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
service.UpdateHead(ctx, 0)
|
||||
require.Equal(t, r, service.headRoot())
|
||||
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
|
||||
require.Equal(t, r, service.head.root) // Validate head is the new one
|
||||
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
@@ -45,6 +47,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
@@ -52,9 +56,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle post block operations such as attestations and exits.
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
|
||||
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
|
||||
log.WithError(err).Error("Could not prune canonical objects from pool ")
|
||||
}
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
@@ -63,11 +67,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
finalized := s.FinalizedCheckpt()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
justified := s.CurrentJustifiedCheckpt()
|
||||
cp = s.ForkChoicer().JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
@@ -90,6 +96,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
@@ -114,14 +123,15 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
finalized := s.FinalizedCheckpt()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.FinalizedCheckpt()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -142,32 +152,45 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock, root [32]byte) error {
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// By comparing the current headroot, that has already gone through forkchoice,
|
||||
// we can assume that if equal the current block root is canonical.
|
||||
if !bytes.Equal(headRoot, root[:]) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
for _, e := range blk.Block().Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark block BLS changes as seen so we don't include same ones in future blocks.
|
||||
if err := s.handleBlockBLSToExecChanges(b); err != nil {
|
||||
if err := s.markIncludedBlockBLSToExecChanges(blk.Block()); err != nil {
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
for _, as := range blk.Block().Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
func (s *Service) markIncludedBlockBLSToExecChanges(headBlock interfaces.ReadOnlyBeaconBlock) error {
|
||||
if headBlock.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
changes, err := headBlock.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
@@ -179,11 +202,12 @@ func (s *Service) handleBlockBLSToExecChanges(blk interfaces.ReadOnlyBeaconBlock
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
// Requires a read lock on forkchoice
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.FinalizedCheckpt()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -97,7 +97,8 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
pending, err := s.cfg.ExitPool.PendingExits()
|
||||
require.NoError(t, err)
|
||||
if len(pending) != 0 {
|
||||
t.Errorf(
|
||||
"Did not mark the correct number of exits. Got %d pending but wanted %d",
|
||||
@@ -356,7 +357,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
@@ -366,7 +367,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
@@ -388,7 +389,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,20 +44,19 @@ import (
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot primitives.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
processAttestationsLock sync.Mutex
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot primitives.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -200,6 +199,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
}
|
||||
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
@@ -426,6 +427,8 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
s.originBlockRoot = genesisBlkRoot
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
@@ -446,6 +449,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
// 1.) Check fork choice store.
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
// this function requires a lock in forkchoice
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
|
||||
@@ -20,11 +20,13 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -19,11 +19,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -32,6 +34,7 @@ var ErrNilState = errors.New("nil state")
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
NotFinalized bool
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
@@ -389,11 +392,6 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// VerifyFinalizedBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return s.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
@@ -402,14 +400,6 @@ func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attes
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
|
||||
func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
|
||||
if !bytes.Equal(r, s.FinalizedCheckPoint.Root) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainHeads mocks ChainHeads and always return nil.
|
||||
func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
return [][32]byte{
|
||||
@@ -459,6 +449,11 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
// InForkchoice mocks the same method in the chain service
|
||||
func (s *ChainService) InForkchoice(_ [32]byte) bool {
|
||||
return !s.NotFinalized
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
@@ -466,7 +461,17 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bo
|
||||
}
|
||||
|
||||
// UpdateHead mocks the same method in the chain service.
|
||||
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
|
||||
func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
}
|
||||
err = s.ForkChoicer().InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
@@ -475,3 +480,37 @@ func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterS
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
return s.FinalizedRoots[blockRoot]
|
||||
}
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
_ context.Context,
|
||||
slot primitives.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := &enginev1.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, 1),
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
FinalizedCheckpoint: finalized,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
24
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
24
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -19,3 +19,25 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
],
|
||||
data = [
|
||||
"@eip4881_spec_tests//:test_data",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//io/file:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@in_gopkg_yaml_v3//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -23,8 +23,6 @@ var (
|
||||
ErrInvalidIndex = errors.New("index should be greater than finalizedDeposits - 1")
|
||||
// ErrNoDeposits occurs when the number of deposits is 0.
|
||||
ErrNoDeposits = errors.New("number of deposits should be greater than 0")
|
||||
// ErrNoFinalizedDeposits occurs when the number of finalized deposits is 0.
|
||||
ErrNoFinalizedDeposits = errors.New("number of finalized deposits should be greater than 0")
|
||||
// ErrTooManyDeposits occurs when the number of deposits exceeds the capacity of the tree.
|
||||
ErrTooManyDeposits = errors.New("number of deposits should not be greater than the capacity of the tree")
|
||||
)
|
||||
@@ -62,7 +60,7 @@ func (d *DepositTree) getSnapshot() (DepositTreeSnapshot, error) {
|
||||
return DepositTreeSnapshot{}, ErrEmptyExecutionBlock
|
||||
}
|
||||
var finalized [][32]byte
|
||||
depositCount, _ := d.tree.GetFinalized(finalized)
|
||||
depositCount, finalized := d.tree.GetFinalized(finalized)
|
||||
return fromTreeParts(finalized, depositCount, d.finalizedExecutionBlock)
|
||||
}
|
||||
|
||||
@@ -119,9 +117,6 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
return [32]byte{}, nil, ErrInvalidMixInLength
|
||||
}
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
if finalizedDeposits == 0 {
|
||||
return [32]byte{}, nil, ErrNoFinalizedDeposits
|
||||
}
|
||||
if finalizedDeposits != 0 {
|
||||
finalizedDeposits = finalizedDeposits - 1
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
}
|
||||
size >>= 1
|
||||
}
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian(ds.depositCount)...)), nil
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian32(ds.depositCount)...)), nil
|
||||
}
|
||||
|
||||
// fromTreeParts constructs the deposit tree from pre-existing data.
|
||||
|
||||
54
beacon-chain/cache/depositsnapshot/deposit_tree_snapshot_test.go
vendored
Normal file
54
beacon-chain/cache/depositsnapshot/deposit_tree_snapshot_test.go
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestDepositTreeSnapshot_CalculateRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalized int
|
||||
depositCount uint64
|
||||
want [32]byte
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
finalized: 0,
|
||||
depositCount: 0,
|
||||
want: [32]byte{215, 10, 35, 71, 49, 40, 92, 104, 4, 194, 164, 245, 103, 17, 221, 184, 200, 44, 153, 116, 15, 32, 120, 84, 137, 16, 40, 175, 52, 226, 126, 94},
|
||||
},
|
||||
{
|
||||
name: "1 Finalized",
|
||||
finalized: 1,
|
||||
depositCount: 2,
|
||||
want: [32]byte{36, 118, 154, 57, 217, 109, 145, 116, 238, 1, 207, 59, 187, 28, 69, 187, 70, 55, 153, 180, 15, 150, 37, 72, 140, 36, 109, 154, 212, 202, 47, 59},
|
||||
},
|
||||
{
|
||||
name: "many finalised",
|
||||
finalized: 6,
|
||||
depositCount: 20,
|
||||
want: [32]byte{210, 63, 57, 119, 12, 5, 3, 25, 139, 20, 244, 59, 114, 119, 35, 88, 222, 88, 122, 106, 239, 20, 45, 140, 99, 92, 222, 166, 133, 159, 128, 72},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var finalized [][32]byte
|
||||
for i := 0; i < tt.finalized; i++ {
|
||||
finalized = append(finalized, hexString(t, fmt.Sprintf("%064d", i)))
|
||||
}
|
||||
ds := &DepositTreeSnapshot{
|
||||
finalized: finalized,
|
||||
depositCount: tt.depositCount,
|
||||
}
|
||||
root, err := ds.CalculateRoot()
|
||||
require.NoError(t, err)
|
||||
if got := root; !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
141
beacon-chain/cache/depositsnapshot/merkle_tree_test.go
vendored
Normal file
141
beacon-chain/cache/depositsnapshot/merkle_tree_test.go
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func hexString(t *testing.T, hexStr string) [32]byte {
|
||||
t.Helper()
|
||||
b, err := hex.DecodeString(hexStr)
|
||||
require.NoError(t, err)
|
||||
if len(b) != 32 {
|
||||
assert.Equal(t, 32, len(b), "bad hash length, expected 32")
|
||||
}
|
||||
x := (*[32]byte)(b)
|
||||
return *x
|
||||
}
|
||||
|
||||
func Test_create(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leaves [][32]byte
|
||||
depth uint64
|
||||
want MerkleTreeNode
|
||||
}{
|
||||
{
|
||||
name: "empty tree",
|
||||
leaves: nil,
|
||||
depth: 0,
|
||||
want: &ZeroNode{},
|
||||
},
|
||||
{
|
||||
name: "zero depth",
|
||||
leaves: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
depth: 0,
|
||||
want: &LeafNode{},
|
||||
},
|
||||
{
|
||||
name: "depth of 1",
|
||||
leaves: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
depth: 1,
|
||||
want: &InnerNode{&LeafNode{}, &ZeroNode{}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := create(tt.leaves, tt.depth); !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fromSnapshotParts(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalized [][32]byte
|
||||
deposits uint64
|
||||
level uint64
|
||||
want MerkleTreeNode
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
finalized: nil,
|
||||
deposits: 0,
|
||||
level: 0,
|
||||
want: &ZeroNode{},
|
||||
},
|
||||
{
|
||||
name: "single finalized node",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 1,
|
||||
level: 0,
|
||||
want: &FinalizedNode{
|
||||
depositCount: 1,
|
||||
hash: [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple deposits and 1 Finalized",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 2,
|
||||
level: 4,
|
||||
want: &InnerNode{
|
||||
left: &InnerNode{&InnerNode{&FinalizedNode{depositCount: 2, hash: hexString(t, fmt.Sprintf("%064d", 0))}, &ZeroNode{1}}, &ZeroNode{2}},
|
||||
right: &ZeroNode{3},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tree, err := fromSnapshotParts(tt.finalized, tt.deposits, tt.level)
|
||||
require.NoError(t, err)
|
||||
if got := tree; !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_generateProof(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leaves uint64
|
||||
}{
|
||||
{
|
||||
name: "1 leaf",
|
||||
leaves: 1,
|
||||
},
|
||||
{
|
||||
name: "4 leaves",
|
||||
leaves: 4,
|
||||
},
|
||||
{
|
||||
name: "10 leaves",
|
||||
leaves: 10,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
tree := New()
|
||||
for _, c := range testCases[:tt.leaves] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := uint64(0); i < tt.leaves; i++ {
|
||||
leaf, proof := generateProof(tree.tree, i, DepositContractDepth)
|
||||
require.Equal(t, leaf, testCases[i].DepositDataRoot)
|
||||
calcRoot := merkleRootFromBranch(leaf, proof, i)
|
||||
require.Equal(t, tree.tree.GetRoot(), calcRoot)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
355
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
Normal file
355
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
DepositData depositData `yaml:"deposit_data"`
|
||||
DepositDataRoot [32]byte `yaml:"deposit_data_root"`
|
||||
Eth1Data *eth1Data `yaml:"eth1_data"`
|
||||
BlockHeight uint64 `yaml:"block_height"`
|
||||
Snapshot snapshot `yaml:"snapshot"`
|
||||
}
|
||||
|
||||
func (tc *testCase) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
DepositData depositData `yaml:"deposit_data"`
|
||||
DepositDataRoot string `yaml:"deposit_data_root"`
|
||||
Eth1Data *eth1Data `yaml:"eth1_data"`
|
||||
BlockHeight string `yaml:"block_height"`
|
||||
Snapshot snapshot `yaml:"snapshot"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.DepositDataRoot, err = hexStringToByteArray(raw.DepositDataRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.DepositData = raw.DepositData
|
||||
tc.Eth1Data = raw.Eth1Data
|
||||
tc.BlockHeight, err = stringToUint64(raw.BlockHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.Snapshot = raw.Snapshot
|
||||
return nil
|
||||
}
|
||||
|
||||
type depositData struct {
|
||||
Pubkey []byte `yaml:"pubkey"`
|
||||
WithdrawalCredentials []byte `yaml:"withdrawal_credentials"`
|
||||
Amount uint64 `yaml:"amount"`
|
||||
Signature []byte `yaml:"signature"`
|
||||
}
|
||||
|
||||
func (dd *depositData) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
Pubkey string `yaml:"pubkey"`
|
||||
WithdrawalCredentials string `yaml:"withdrawal_credentials"`
|
||||
Amount string `yaml:"amount"`
|
||||
Signature string `yaml:"signature"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Pubkey, err = hexStringToBytes(raw.Pubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.WithdrawalCredentials, err = hexStringToBytes(raw.WithdrawalCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Amount, err = strconv.ParseUint(raw.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Signature, err = hexStringToBytes(raw.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type eth1Data struct {
|
||||
DepositRoot [32]byte `yaml:"deposit_root"`
|
||||
DepositCount uint64 `yaml:"deposit_count"`
|
||||
BlockHash [32]byte `yaml:"block_hash"`
|
||||
}
|
||||
|
||||
func (ed *eth1Data) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
DepositRoot string `yaml:"deposit_root"`
|
||||
DepositCount string `yaml:"deposit_count"`
|
||||
BlockHash string `yaml:"block_hash"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.DepositRoot, err = hexStringToByteArray(raw.DepositRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.DepositCount, err = stringToUint64(raw.DepositCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.BlockHash, err = hexStringToByteArray(raw.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type snapshot struct {
|
||||
DepositTreeSnapshot
|
||||
}
|
||||
|
||||
func (sd *snapshot) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
Finalized []string `yaml:"finalized"`
|
||||
DepositRoot string `yaml:"deposit_root"`
|
||||
DepositCount string `yaml:"deposit_count"`
|
||||
ExecutionBlockHash string `yaml:"execution_block_hash"`
|
||||
ExecutionBlockHeight string `yaml:"execution_block_height"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.finalized = make([][32]byte, len(raw.Finalized))
|
||||
for i, finalized := range raw.Finalized {
|
||||
sd.finalized[i], err = hexStringToByteArray(finalized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sd.depositRoot, err = hexStringToByteArray(raw.DepositRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.depositCount, err = stringToUint64(raw.DepositCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.executionBlock.Hash, err = hexStringToByteArray(raw.ExecutionBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.executionBlock.Depth, err = stringToUint64(raw.ExecutionBlockHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readTestCases() ([]testCase, error) {
|
||||
testFolders, err := bazel.ListRunfiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ff := range testFolders {
|
||||
if strings.Contains(ff.ShortPath, "eip4881_spec_tests") &&
|
||||
strings.Contains(ff.ShortPath, "eip-4881/test_cases.yaml") {
|
||||
enc, err := file.ReadFileAsBytes(ff.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var testCases []testCase
|
||||
err = yaml.Unmarshal(enc, &testCases)
|
||||
if err != nil {
|
||||
return []testCase{}, err
|
||||
}
|
||||
return testCases, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("spec test file not found")
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
tcs, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, tc := range tcs {
|
||||
t.Log(tc)
|
||||
}
|
||||
}
|
||||
|
||||
func hexStringToByteArray(s string) (b [32]byte, err error) {
|
||||
var raw []byte
|
||||
raw, err = hexStringToBytes(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(raw) != 32 {
|
||||
err = errors.New("invalid hex string length")
|
||||
return
|
||||
}
|
||||
copy(b[:], raw[:32])
|
||||
return
|
||||
}
|
||||
|
||||
func hexStringToBytes(s string) (b []byte, err error) {
|
||||
b, err = hex.DecodeString(strings.TrimPrefix(s, "0x"))
|
||||
return
|
||||
}
|
||||
|
||||
func stringToUint64(s string) (uint64, error) {
|
||||
value, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func merkleRootFromBranch(leaf [32]byte, branch [][32]byte, index uint64) [32]byte {
|
||||
root := leaf
|
||||
for i, l := range branch {
|
||||
ithBit := (index >> i) & 0x1
|
||||
if ithBit == 1 {
|
||||
root = sha256.Sum256(append(l[:], root[:]...))
|
||||
} else {
|
||||
root = sha256.Sum256(append(root[:], l[:]...))
|
||||
}
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func checkProof(t *testing.T, tree *DepositTree, index uint64) {
|
||||
leaf, proof, err := tree.getProof(index)
|
||||
require.NoError(t, err)
|
||||
calcRoot := merkleRootFromBranch(leaf, proof, index)
|
||||
require.Equal(t, tree.getRoot(), calcRoot)
|
||||
}
|
||||
|
||||
func compareProof(t *testing.T, tree1, tree2 *DepositTree, index uint64) {
|
||||
require.Equal(t, tree1.getRoot(), tree2.getRoot())
|
||||
checkProof(t, tree1, index)
|
||||
checkProof(t, tree2, index)
|
||||
}
|
||||
|
||||
func cloneFromSnapshot(t *testing.T, snapshot DepositTreeSnapshot, testCases []testCase) *DepositTree {
|
||||
cp, err := fromSnapshot(snapshot)
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = cp.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return &cp
|
||||
}
|
||||
|
||||
func TestDepositCases(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalization(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases[:128] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
originalRoot := tree.getRoot()
|
||||
require.DeepEqual(t, testCases[127].Eth1Data.DepositRoot, originalRoot)
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[100].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[100].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[100].Eth1Data.BlockHash[:],
|
||||
}, testCases[100].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// ensure finalization doesn't change root
|
||||
require.Equal(t, tree.getRoot(), originalRoot)
|
||||
snapshotData, err := tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, testCases[100].Snapshot.DepositTreeSnapshot, snapshotData)
|
||||
// create a copy of the tree from a snapshot by replaying
|
||||
// the deposits after the finalized deposit
|
||||
cp := cloneFromSnapshot(t, snapshotData, testCases[101:128])
|
||||
// ensure original and copy have the same root
|
||||
require.Equal(t, tree.getRoot(), cp.getRoot())
|
||||
// finalize original again to check double finalization
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[105].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[105].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[105].Eth1Data.BlockHash[:],
|
||||
}, testCases[105].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// root should still be the same
|
||||
require.Equal(t, originalRoot, tree.getRoot())
|
||||
// create a copy of the tree by taking a snapshot again
|
||||
snapshotData, err = tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
cp = cloneFromSnapshot(t, snapshotData, testCases[106:128])
|
||||
// create a copy of the tree by replaying ALL deposits from nothing
|
||||
fullTreeCopy := New()
|
||||
for _, c := range testCases[:128] {
|
||||
err = fullTreeCopy.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 106; i < 128; i++ {
|
||||
compareProof(t, tree, cp, uint64(i))
|
||||
compareProof(t, tree, fullTreeCopy, uint64(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotCases(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for _, c := range testCases {
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: c.Eth1Data.DepositRoot[:],
|
||||
DepositCount: c.Eth1Data.DepositCount,
|
||||
BlockHash: c.Eth1Data.BlockHash[:],
|
||||
}, c.BlockHeight)
|
||||
require.NoError(t, err)
|
||||
s, err := tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.Snapshot.DepositTreeSnapshot, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyTreeSnapshot(t *testing.T) {
|
||||
_, err := New().getSnapshot()
|
||||
require.ErrorContains(t, "empty execution block", err)
|
||||
}
|
||||
|
||||
func TestInvalidSnapshot(t *testing.T) {
|
||||
invalidSnapshot := DepositTreeSnapshot{
|
||||
finalized: nil,
|
||||
depositRoot: Zerohashes[0],
|
||||
depositCount: 0,
|
||||
executionBlock: executionBlock{
|
||||
Hash: Zerohashes[0],
|
||||
Depth: 0,
|
||||
},
|
||||
}
|
||||
_, err := fromSnapshot(invalidSnapshot)
|
||||
require.ErrorContains(t, "snapshot root is invalid", err)
|
||||
}
|
||||
@@ -44,49 +44,51 @@ import (
|
||||
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
// else:
|
||||
// decrease_balance(state, participant_index, participant_reward)
|
||||
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
|
||||
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
|
||||
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, uint64, error) {
|
||||
s, votedKeys, reward, err := processSyncAggregate(ctx, s, sync)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter sync committee votes")
|
||||
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
|
||||
}
|
||||
|
||||
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify sync committee signature")
|
||||
return nil, 0, errors.Wrap(err, "could not verify sync committee signature")
|
||||
}
|
||||
return s, nil
|
||||
return s, reward, nil
|
||||
}
|
||||
|
||||
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
|
||||
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
|
||||
// public keys that voted, for future signature verification.
|
||||
// verifying the BLS signatures. It returns the modified beacons state, the list of validators'
|
||||
// public keys that voted (for future signature verification) and the proposer reward for including
|
||||
// sync aggregate messages.
|
||||
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
|
||||
state.BeaconState,
|
||||
[]bls.PublicKey,
|
||||
uint64,
|
||||
error) {
|
||||
currentSyncCommittee, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
if currentSyncCommittee == nil {
|
||||
return nil, nil, errors.New("nil current sync committee in state")
|
||||
return nil, nil, 0, errors.New("nil current sync committee in state")
|
||||
}
|
||||
committeeKeys := currentSyncCommittee.Pubkeys
|
||||
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
|
||||
return nil, nil, errors.New("bits length exceeds committee length")
|
||||
return nil, nil, 0, errors.New("bits length exceeds committee length")
|
||||
}
|
||||
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
|
||||
|
||||
activeBalance, err := helpers.TotalActiveBalance(s)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
proposerReward, participantReward, err := SyncRewards(activeBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
earnedProposerReward := uint64(0)
|
||||
@@ -94,29 +96,29 @@ func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
|
||||
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
|
||||
// Impossible scenario.
|
||||
if !exists {
|
||||
return nil, nil, errors.New("validator public key does not exist in state")
|
||||
return nil, nil, 0, errors.New("validator public key does not exist in state")
|
||||
}
|
||||
|
||||
if sync.SyncCommitteeBits.BitAt(i) {
|
||||
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
votedKeys = append(votedKeys, pubKey)
|
||||
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
earnedProposerReward += proposerReward
|
||||
} else {
|
||||
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
return s, votedKeys, err
|
||||
return s, votedKeys, earnedProposerReward, err
|
||||
}
|
||||
|
||||
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -53,8 +54,10 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
beaconState, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
var reward uint64
|
||||
beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(72192), reward)
|
||||
|
||||
// Use a non-sync committee index to compare profitability.
|
||||
syncCommittee := make(map[primitives.ValidatorIndex]bool)
|
||||
@@ -127,7 +130,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
require.ErrorContains(t, "invalid sync committee signature", err)
|
||||
}
|
||||
|
||||
@@ -164,7 +167,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -189,7 +192,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
|
||||
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 511, len(votedKeys))
|
||||
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
|
||||
@@ -212,7 +215,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
|
||||
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
|
||||
for _, key := range votedKeys {
|
||||
|
||||
@@ -116,7 +116,7 @@ func VerifyExitAndSignature(
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyExitConditions implements the spec defined validation for voluntary exits(excluding signatures).
|
||||
// verifyExitConditions implements the spec defined validation for voluntary exits (excluding signatures).
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
|
||||
@@ -351,7 +351,7 @@ func ProcessBlockForStateRoot(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get sync aggregate from block")
|
||||
}
|
||||
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
}
|
||||
|
||||
@@ -178,7 +178,6 @@ func SlashValidator(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get proposer idx")
|
||||
}
|
||||
// In phase 0, the proposer is the whistleblower.
|
||||
whistleBlowerIdx := proposerIdx
|
||||
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
proposerReward := whistleblowerReward / proposerRewardQuotient
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_blinded_beacon_blocks.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"schema.go",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -272,6 +271,22 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
return s.SaveBlocks(ctx, []interfaces.ReadOnlySignedBeaconBlock{signed})
|
||||
}
|
||||
|
||||
// This function determines if we should save beacon blocks in the DB in blinded format by checking
|
||||
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
|
||||
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
|
||||
// to the DB for future checks.
|
||||
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
|
||||
var saveBlinded bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
metadataBkt := tx.Bucket(chainMetadataBucket)
|
||||
saveBlinded = len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return saveBlinded, nil
|
||||
}
|
||||
|
||||
// SaveBlocks via bulk updates to the db.
|
||||
func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
|
||||
@@ -287,7 +302,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := marshalBlock(ctx, blk)
|
||||
enc, err := s.marshalBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -296,6 +311,10 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
|
||||
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
|
||||
indicesForBlocks[i] = indicesByBucket
|
||||
}
|
||||
saveBlinded, err := s.shouldSaveBlinded(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for i, blk := range blks {
|
||||
@@ -305,7 +324,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
if saveBlinded {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
|
||||
@@ -809,50 +828,71 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
return blocks.NewSignedBeaconBlock(rawBlock)
|
||||
}
|
||||
|
||||
// marshal versioned beacon block from struct type down to bytes.
|
||||
func marshalBlock(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
func (s *Store) marshalBlock(
|
||||
ctx context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
shouldBlind, err := s.shouldSaveBlinded(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if shouldBlind {
|
||||
return marshalBlockBlinded(ctx, blk)
|
||||
}
|
||||
return marshalBlockFull(ctx, blk)
|
||||
}
|
||||
|
||||
// Encodes a full beacon block to the DB with its associated key.
|
||||
func marshalBlockFull(
|
||||
_ context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
var encodedBlock []byte
|
||||
var err error
|
||||
blockToSave := blk
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
switch {
|
||||
case errors.Is(err, blocks.ErrUnsupportedVersion):
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal non-blinded block")
|
||||
}
|
||||
case err != nil:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
default:
|
||||
encodedBlock, err = blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
blockToSave = blindedBlock
|
||||
}
|
||||
} else {
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch blockToSave.Version() {
|
||||
switch blk.Version() {
|
||||
case version.Capella:
|
||||
if blockToSave.IsBlinded() {
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
if blockToSave.IsBlinded() {
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
}
|
||||
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
|
||||
case version.Phase0:
|
||||
return snappy.Encode(nil, encodedBlock), nil
|
||||
default:
|
||||
return nil, errors.New("Unknown block version")
|
||||
return nil, errors.New("unknown block version")
|
||||
}
|
||||
}
|
||||
|
||||
// Encodes a blinded beacon block with its associated key.
|
||||
// If the block does not support blinding, we then encode it as a full
|
||||
// block with its associated key by calling marshalBlockFull.
|
||||
func marshalBlockBlinded(
|
||||
ctx context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, blocks.ErrUnsupportedVersion):
|
||||
return marshalBlockFull(ctx, blk)
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
}
|
||||
}
|
||||
encodedBlock, err := blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
)
|
||||
|
||||
@@ -10,7 +9,4 @@ func init() {
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
features.Init(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
@@ -194,7 +195,8 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kv.checkNeedsResync(); err != nil {
|
||||
// Setup the type of block storage used depending on whether or not this is a fresh database.
|
||||
if err := kv.setupBlockStorageType(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kv, nil
|
||||
@@ -229,21 +231,60 @@ func (s *Store) DatabasePath() string {
|
||||
return s.databasePath
|
||||
}
|
||||
|
||||
func (s *Store) checkNeedsResync() error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
|
||||
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
|
||||
return fmt.Errorf(
|
||||
"you have disabled the flag %s, and your node must resync to ensure your "+
|
||||
"database is compatible. If you do not want to resync, please re-enable the %s flag",
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
)
|
||||
func (s *Store) setupBlockStorageType(ctx context.Context) error {
|
||||
// We check if we want to save blinded beacon blocks by checking a key in the db
|
||||
// otherwise, we check the last stored block and set that key in the DB if it is blinded.
|
||||
headBlock, err := s.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head block when setting up block storage type")
|
||||
}
|
||||
err = blocks.BeaconBlockIsNil(headBlock)
|
||||
isNilBlk := err != nil
|
||||
saveFull := features.Get().SaveFullExecutionPayloads
|
||||
|
||||
var saveBlinded bool
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// If we have a key stating we wish to save blinded beacon blocks, then we set saveBlinded to true.
|
||||
metadataBkt := tx.Bucket(chainMetadataBucket)
|
||||
keyExists := len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
|
||||
if keyExists {
|
||||
saveBlinded = true
|
||||
return nil
|
||||
}
|
||||
// If the head block exists and is blinded, we update the key in the DB to
|
||||
// say we wish to save all blocks as blinded.
|
||||
if !isNilBlk && headBlock.IsBlinded() {
|
||||
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
|
||||
return err
|
||||
}
|
||||
saveBlinded = true
|
||||
}
|
||||
if isNilBlk && !saveFull {
|
||||
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
|
||||
return err
|
||||
}
|
||||
saveBlinded = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the user wants to save full execution payloads but their database is saving blinded blocks only,
|
||||
// we then throw an error as the node should not start.
|
||||
if saveFull && saveBlinded {
|
||||
return fmt.Errorf(
|
||||
"cannot use the %s flag with this existing database, as it has already been initialized to only store "+
|
||||
"execution payload headers (aka blinded beacon blocks). If you want to use this flag, you must re-sync your node with a fresh "+
|
||||
"database. We recommend using checkpoint sync https://docs.prylabs.network/docs/prysm-usage/checkpoint-sync/",
|
||||
features.SaveFullExecutionPayloads.Name,
|
||||
)
|
||||
}
|
||||
if saveFull {
|
||||
log.Warn("Saving full beacon blocks to the database. For greater disk space savings, we recommend resyncing from an empty database with " +
|
||||
"checkpoint sync to save only blinded beacon blocks by default")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {
|
||||
|
||||
@@ -2,10 +2,14 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -19,16 +23,169 @@ func setupDB(t testing.TB) *Store {
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_checkNeedsResync(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: false,
|
||||
func Test_setupBlockStorageType(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
SaveFullExecutionPayloads: true,
|
||||
})
|
||||
defer resetFn()
|
||||
store := setupDB(t)
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err := wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
require.NoError(t, store.SaveStateSummary(ctx, ðpb.StateSummary{Root: root[:]}))
|
||||
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
|
||||
retrievedBlk, err := store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, retrievedBlk.IsBlinded())
|
||||
require.DeepEqual(t, wrappedBlock, retrievedBlk)
|
||||
})
|
||||
t.Run("fresh database with default settings should store blinded", func(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
SaveFullExecutionPayloads: false,
|
||||
})
|
||||
defer resetFn()
|
||||
store := setupDB(t)
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err := wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
require.NoError(t, store.SaveStateSummary(ctx, ðpb.StateSummary{Root: root[:]}))
|
||||
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
|
||||
retrievedBlk, err := store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
|
||||
wantedBlk, err := wrappedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantedBlk, retrievedBlk)
|
||||
})
|
||||
t.Run("existing database with blinded blocks but no key in metadata bucket should continue storing blinded blocks", func(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(chainMetadataBucket).Put(saveBlindedBeaconBlocksKey, []byte{1})
|
||||
}))
|
||||
|
||||
blk := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayloadHeader.BlockNumber = 1
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err := wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
require.NoError(t, store.SaveStateSummary(ctx, ðpb.StateSummary{Root: root[:]}))
|
||||
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
|
||||
retrievedBlk, err := store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
require.DeepEqual(t, wrappedBlock, retrievedBlk)
|
||||
|
||||
// We then delete the key from the bucket.
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
|
||||
}))
|
||||
|
||||
// Not a fresh database, has blinded blocks already and should continue being that way.
|
||||
err = store.setupBlockStorageType(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var shouldSaveBlinded bool
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(chainMetadataBucket)
|
||||
shouldSaveBlinded = len(bkt.Get(saveBlindedBeaconBlocksKey)) > 0
|
||||
return nil
|
||||
}))
|
||||
|
||||
// Should have set the chain metadata bucket to save blinded
|
||||
require.Equal(t, true, shouldSaveBlinded)
|
||||
|
||||
blkFull := util.NewBeaconBlockBellatrix()
|
||||
blkFull.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wrappedBlock, err = blocks.NewSignedBeaconBlock(blkFull)
|
||||
require.NoError(t, err)
|
||||
root, err = wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
retrievedBlk, err = store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
wrappedBlinded, err := wrappedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
|
||||
})
|
||||
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
|
||||
}))
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err := wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
require.NoError(t, store.SaveStateSummary(ctx, ðpb.StateSummary{Root: root[:]}))
|
||||
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
|
||||
retrievedBlk, err := store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, retrievedBlk.IsBlinded())
|
||||
require.DeepEqual(t, wrappedBlock, retrievedBlk)
|
||||
|
||||
// Not a fresh database, has full blocks already and should continue being that way.
|
||||
err = store.setupBlockStorageType(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
blk = util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wrappedBlock, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err = wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
retrievedBlk, err = store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, retrievedBlk.IsBlinded())
|
||||
require.DeepEqual(t, wrappedBlock, retrievedBlk)
|
||||
})
|
||||
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
root, err := wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
require.NoError(t, store.SaveStateSummary(ctx, ðpb.StateSummary{Root: root[:]}))
|
||||
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
|
||||
retrievedBlk, err := store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
wantedBlk, err := wrappedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantedBlk, retrievedBlk)
|
||||
|
||||
// Trying to enable full blocks with a database that is already storing blinded blocks should error.
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
SaveFullExecutionPayloads: true,
|
||||
})
|
||||
defer resetFn()
|
||||
err = store.setupBlockStorageType(ctx)
|
||||
errMsg := "cannot use the %s flag with this existing database, as it has already been initialized"
|
||||
require.ErrorContains(t, fmt.Sprintf(errMsg, features.SaveFullExecutionPayloads.Name), err)
|
||||
})
|
||||
defer resetFn()
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
return bkt.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}))
|
||||
err := store.checkNeedsResync()
|
||||
require.ErrorContains(t, "your node must resync", err)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateBlindedBeaconBlocksEnabled,
|
||||
}
|
||||
|
||||
// RunMigrations defined in the migrations array.
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationBlindedBeaconBlocksKey = []byte("blinded-beacon-blocks-enabled")
|
||||
|
||||
func migrateBlindedBeaconBlocksEnabled(ctx context.Context, db *bolt.DB) error {
|
||||
if !features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
return nil // Only write to the migrations bucket if the feature is enabled.
|
||||
}
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationBlindedBeaconBlocksKey); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
return mb.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -49,12 +49,12 @@ var (
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
altairKey = []byte("altair")
|
||||
bellatrixKey = []byte("merge")
|
||||
bellatrixBlindKey = []byte("blind-bellatrix")
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
|
||||
altairKey = []byte("altair")
|
||||
bellatrixKey = []byte("merge")
|
||||
bellatrixBlindKey = []byte("blind-bellatrix")
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
|
||||
@@ -74,5 +74,5 @@ func TestStateSummary_CanDelete(t *testing.T) {
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(r1))
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should be deleted")
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -107,6 +108,7 @@ go_test(
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -129,9 +131,9 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//beacon/engine:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/beacon:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -48,6 +49,10 @@ const (
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -437,6 +442,50 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
|
||||
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes)
|
||||
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
|
||||
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByRangeV1, start, count)
|
||||
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -669,6 +718,9 @@ func handleRPCError(err error) error {
|
||||
case -38003:
|
||||
errInvalidPayloadAttributesCount.Inc()
|
||||
return ErrInvalidPayloadAttributes
|
||||
case -38004:
|
||||
errRequestTooLargeCount.Inc()
|
||||
return ErrRequestTooLarge
|
||||
case -32000:
|
||||
errServerErrorCount.Inc()
|
||||
// Only -32000 status codes are data errors in the RPC specification.
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
|
||||
func FuzzForkChoiceResponse(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
payloadID := beacon.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
|
||||
payloadID := engine.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
|
||||
valErr := "asjajshjahsaj"
|
||||
seed := &beacon.ForkChoiceResponse{
|
||||
PayloadStatus: beacon.PayloadStatusV1{
|
||||
seed := &engine.ForkChoiceResponse{
|
||||
PayloadStatus: engine.PayloadStatusV1{
|
||||
Status: "INVALID_TERMINAL_BLOCK",
|
||||
LatestValidHash: &valHash,
|
||||
ValidationError: &valErr,
|
||||
@@ -37,7 +37,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.ForkChoiceResponse{}
|
||||
gethResp := &engine.ForkChoiceResponse{}
|
||||
prysmResp := &execution.ForkchoiceUpdatedResponse{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
@@ -49,14 +49,14 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &beacon.ForkChoiceResponse{}
|
||||
newGethResp := &engine.ForkChoiceResponse{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
if newGethResp.PayloadStatus.Status == "UNKNOWN" {
|
||||
return
|
||||
}
|
||||
|
||||
newGethResp2 := &beacon.ForkChoiceResponse{}
|
||||
newGethResp2 := &engine.ForkChoiceResponse{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
@@ -75,7 +75,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
|
||||
seed := &beacon.TransitionConfigurationV1{
|
||||
seed := &engine.TransitionConfigurationV1{
|
||||
TerminalTotalDifficulty: &ttd,
|
||||
TerminalBlockHash: valHash,
|
||||
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
|
||||
@@ -85,7 +85,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.TransitionConfigurationV1{}
|
||||
gethResp := &engine.TransitionConfigurationV1{}
|
||||
prysmResp := &pb.TransitionConfiguration{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
@@ -103,11 +103,11 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
newGethResp := &beacon.TransitionConfigurationV1{}
|
||||
newGethResp := &engine.TransitionConfigurationV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
newGethResp2 := &beacon.TransitionConfigurationV1{}
|
||||
newGethResp2 := &engine.TransitionConfigurationV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
})
|
||||
@@ -115,7 +115,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &beacon.ExecutableData{
|
||||
execData := &engine.ExecutableData{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
|
||||
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
@@ -135,7 +135,7 @@ func FuzzExecutionPayload(f *testing.F) {
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.ExecutableData{}
|
||||
gethResp := &engine.ExecutableData{}
|
||||
prysmResp := &pb.ExecutionPayload{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
@@ -147,10 +147,10 @@ func FuzzExecutionPayload(f *testing.F) {
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &beacon.ExecutableData{}
|
||||
newGethResp := &engine.ExecutableData{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
newGethResp2 := &beacon.ExecutableData{}
|
||||
newGethResp2 := &engine.ExecutableData{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -1314,7 +1315,7 @@ func fixtures() map[string]interface{} {
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: fmt.Sprintf("%s", "0x123"),
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
@@ -1323,7 +1324,7 @@ func fixtures() map[string]interface{} {
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
},
|
||||
BlockValue: fmt.Sprintf("%s", "0x11ff"),
|
||||
BlockValue: "0x11ff",
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
@@ -1817,3 +1818,505 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByHash(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
})
|
||||
defer resetFn()
|
||||
t.Run("empty response works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("single element response null works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = nil
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("empty, null, full works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, single item", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, multiple items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 2)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
executionPayloadBodies[1] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 2,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("returning empty, null, empty should work properly", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// [A, B, C] but no B in the server means
|
||||
// we get [Abody, null, Cbody].
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByRange(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
})
|
||||
defer resetFn()
|
||||
t.Run("empty response works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("single element response null works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = nil
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("empty, null, full works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, single item", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, multiple items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 2)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
executionPayloadBodies[1] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 2,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("returning empty, null, empty should work properly", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// [A, B, C] but no B in the server means
|
||||
// we get [Abody, null, Cbody].
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -34,4 +34,6 @@ var (
|
||||
ErrInvalidBlockHashPayloadStatus = errors.New("payload status is INVALID_BLOCK_HASH")
|
||||
// ErrNilResponse when the response is nil.
|
||||
ErrNilResponse = errors.New("nil response")
|
||||
// ErrRequestTooLarge when the request is too large
|
||||
ErrRequestTooLarge = errors.New("request too large")
|
||||
)
|
||||
|
||||
@@ -71,4 +71,8 @@ var (
|
||||
Name: "reconstructed_execution_payload_count",
|
||||
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",
|
||||
})
|
||||
errRequestTooLargeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -46,8 +46,6 @@ func New() *ForkChoice {
|
||||
|
||||
// NodeCount returns the current number of nodes in the Store.
|
||||
func (f *ForkChoice) NodeCount() int {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
return len(f.store.nodeByRoot)
|
||||
}
|
||||
|
||||
@@ -58,16 +56,9 @@ func (f *ForkChoice) Head(
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
|
||||
calledHeadCount.Inc()
|
||||
|
||||
// Using the write lock here because subsequent calls to `updateBalances`, `applyProposerBoostScore`,
|
||||
// `applyWeightChanges`, `updateBestDescendant`, and `head` require write operations on nodes.
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
|
||||
if err := f.updateBalances(); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update balances")
|
||||
}
|
||||
@@ -94,8 +85,6 @@ func (f *ForkChoice) Head(
|
||||
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch primitives.Epoch) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
|
||||
for _, index := range validatorIndices {
|
||||
// Validator indices will grow the vote cache.
|
||||
@@ -161,7 +150,6 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
|
||||
// updateCheckpoints update the checkpoints when inserting a new node.
|
||||
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
|
||||
f.store.checkpointsLock.Lock()
|
||||
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
|
||||
if jc.Epoch > f.store.bestJustifiedCheckpoint.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -175,7 +163,6 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: root}
|
||||
if err := f.updateJustifiedBalances(ctx, root); err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return errors.Wrap(err, "could not update justified balances")
|
||||
}
|
||||
} else {
|
||||
@@ -186,25 +173,18 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
}
|
||||
jSlot, err := slots.EpochStart(currentJcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// Releasing here the checkpoints lock because
|
||||
// AncestorRoot acquires a lock on nodes and that can
|
||||
// cause a double lock.
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: jcRoot}
|
||||
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return errors.Wrap(err, "could not update justified balances")
|
||||
}
|
||||
}
|
||||
@@ -214,14 +194,12 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jcRoot}
|
||||
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return errors.Wrap(err, "could not update justified balances")
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update finalization
|
||||
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
|
||||
@@ -231,29 +209,21 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: root}
|
||||
if err := f.updateJustifiedBalances(ctx, root); err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return errors.Wrap(err, "could not update justified balances")
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return f.store.prune(ctx)
|
||||
}
|
||||
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
// false else wise.
|
||||
func (f *ForkChoice) HasNode(root [32]byte) bool {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
_, ok := f.store.nodeByRoot[root]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the given root is part of the canonical chain.
|
||||
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return false
|
||||
@@ -273,9 +243,6 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
if f.store.allTipsAreInvalid {
|
||||
return true, nil
|
||||
}
|
||||
@@ -293,9 +260,6 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
@@ -317,12 +281,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock
|
||||
// and votesLock
|
||||
// validators' latest votes.
|
||||
func (f *ForkChoice) updateBalances() error {
|
||||
// lock checkpoints for the justified balances
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
newBalances := f.justifiedBalances
|
||||
|
||||
for index, vote := range f.votes {
|
||||
@@ -367,7 +327,6 @@ func (f *ForkChoice) updateBalances() error {
|
||||
return errors.Wrap(ErrNilNode, "could not update balances")
|
||||
}
|
||||
if currentNode.balance < oldBalance {
|
||||
f.store.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
|
||||
"oldBalance": oldBalance,
|
||||
@@ -377,7 +336,6 @@ func (f *ForkChoice) updateBalances() error {
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": f.store.previousProposerBoostScore,
|
||||
}).Warning("node with invalid balance, setting it to zero")
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
currentNode.balance = 0
|
||||
} else {
|
||||
currentNode.balance -= oldBalance
|
||||
@@ -405,8 +363,6 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set node to valid")
|
||||
@@ -416,29 +372,21 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
|
||||
|
||||
// BestJustifiedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.bestJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.prevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// JustifiedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.justifiedCheckpoint
|
||||
}
|
||||
|
||||
// FinalizedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.finalizedCheckpoint
|
||||
}
|
||||
|
||||
@@ -451,11 +399,6 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
if f.store.slashedIndices[index] {
|
||||
return
|
||||
@@ -489,8 +432,6 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(ctx context.Context, jc *forkchoi
|
||||
if jc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = jc
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
|
||||
@@ -505,8 +446,6 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
if fc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.finalizedCheckpoint = fc
|
||||
return nil
|
||||
}
|
||||
@@ -516,9 +455,6 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
@@ -559,7 +495,7 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
}
|
||||
}
|
||||
|
||||
// InsertOptimisticChain inserts all nodes corresponding to blocks in the slice
|
||||
// InsertChain inserts all nodes corresponding to blocks in the slice
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
@@ -601,8 +537,6 @@ func (f *ForkChoice) SetOriginRoot(root [32]byte) {
|
||||
|
||||
// CachedHeadRoot returns the last cached head root
|
||||
func (f *ForkChoice) CachedHeadRoot() [32]byte {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
node := f.store.headNode
|
||||
if node == nil {
|
||||
return [32]byte{}
|
||||
@@ -612,8 +546,6 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
|
||||
|
||||
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
|
||||
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
root := f.FinalizedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
@@ -625,8 +557,6 @@ func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
|
||||
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
|
||||
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
root := f.JustifiedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
@@ -692,8 +622,6 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
@@ -702,7 +630,6 @@ func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
}
|
||||
|
||||
// updateJustifiedBalances updates the validators balances on the justified checkpoint pointed by root.
|
||||
// This function requires a lock on checkpointsLock being held by the caller.
|
||||
func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte) error {
|
||||
balances, err := f.balancesByRoot(ctx, root)
|
||||
if err != nil {
|
||||
|
||||
@@ -205,7 +205,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
@@ -213,7 +212,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
r1 := [32]byte{'1'}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: r1}
|
||||
|
||||
@@ -16,13 +16,12 @@ import (
|
||||
// consider a block to be late, and thus a candidate to being reorged.
|
||||
const orphanLateBlockFirstThreshold = 4
|
||||
|
||||
// processAttestationsThreshold is the number of seconds after which we
|
||||
// ProcessAttestationsThreshold is the number of seconds after which we
|
||||
// process attestations for the current slot
|
||||
const processAttestationsThreshold = 10
|
||||
const ProcessAttestationsThreshold = 10
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node. This function requires a lock
|
||||
// in Store.nodesLock
|
||||
// using the current balance stored in each node.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
@@ -43,7 +42,7 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -149,7 +148,7 @@ func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
|
||||
// slot will have secs = 10 below.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
return secs >= processAttestationsThreshold, err
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
|
||||
@@ -27,8 +27,6 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
@@ -55,8 +53,6 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
s.nodeByRoot[indexToHash(1)].weight = 400
|
||||
s.nodeByRoot[indexToHash(2)].weight = 400
|
||||
s.nodeByRoot[indexToHash(3)].weight = 400
|
||||
@@ -298,7 +294,7 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.Equal(t, false, late)
|
||||
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, processAttestationsThreshold+1)
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -42,11 +42,9 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
@@ -62,13 +60,11 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
if err := f.updateJustifiedBalances(ctx, bjcp.Root); err != nil {
|
||||
log.Error("could not update justified balances")
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
if !features.Get().DisablePullTips {
|
||||
|
||||
@@ -8,34 +8,28 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
|
||||
s.nodesLock.RLock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
node, ok = s.nodeByRoot[parentRoot]
|
||||
if !ok || node == nil {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == payloadHash {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, nil
|
||||
}
|
||||
} else {
|
||||
if node == nil {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if node.parent.root != parentRoot {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
}
|
||||
@@ -44,20 +38,16 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
if firstInvalid.parent == nil {
|
||||
// return early if the invalid node was not imported
|
||||
if node.root == parentRoot {
|
||||
s.nodesLock.RUnlock()
|
||||
return invalidRoots, nil
|
||||
}
|
||||
firstInvalid = node
|
||||
}
|
||||
s.nodesLock.RUnlock()
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
if node == nil {
|
||||
@@ -96,7 +86,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
}
|
||||
}
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
s.proposerBoostLock.Lock()
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
@@ -104,7 +93,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return invalidRoots, nil
|
||||
|
||||
@@ -237,19 +237,15 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
|
||||
@@ -10,23 +10,14 @@ import (
|
||||
|
||||
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
|
||||
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyProposerBoostScore applies the current proposer boost scores to the
|
||||
// relevant nodes. This function requires a lock in Store.nodesLock.
|
||||
// relevant nodes.
|
||||
func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
s.proposerBoostLock.Lock()
|
||||
defer s.proposerBoostLock.Unlock()
|
||||
|
||||
// acquire checkpoints lock for the justified balances
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
@@ -53,7 +44,5 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
|
||||
// ProposerBoost of fork choice store.
|
||||
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
|
||||
s.proposerBoostLock.RLock()
|
||||
defer s.proposerBoostLock.RUnlock()
|
||||
return s.proposerBoostRoot
|
||||
}
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// orphanLateBlockProposingEarly determines the maximum threshold that we
|
||||
// consider the node is proposing early and sure to receive proposer boost
|
||||
const orphanLateBlockProposingEarly = 2
|
||||
|
||||
// ShouldOverrideFCU returns whether the current forkchoice head is weak
|
||||
// and thus may be reorged when proposing the next block.
|
||||
// This function should only be called if the following two conditions are
|
||||
@@ -28,9 +35,6 @@ import (
|
||||
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
override = false
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
// We only need to override FCU if our current head is from the current
|
||||
// slot. This differs from the spec implementation in that we assume
|
||||
// that we will call this function in the previous slot to proposing.
|
||||
@@ -38,9 +42,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
if head == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
@@ -55,9 +61,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
return
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
f.store.checkpointsLock.RLock()
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return
|
||||
}
|
||||
@@ -80,3 +84,67 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetProposerHead returns the block root that has to be used as ParentRoot by a
|
||||
// proposer. It may not be the actual head of the canonical chain, in certain
|
||||
// cases it may be its parent, when the last head block has arrived early and is
|
||||
// considered safe to be orphaned.
|
||||
//
|
||||
// This function needs to be called only when proposing a block and all
|
||||
// attestation processing has already happened.
|
||||
func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
if features.Get().DisableReorgLateBlocks {
|
||||
return f.CachedHeadRoot()
|
||||
}
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// Only reorg blocks from the previous slot.
|
||||
if head.slot+1 != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return head.root
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return head.root
|
||||
}
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return head.root
|
||||
}
|
||||
if early {
|
||||
return head.root
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return head.root
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return head.root
|
||||
}
|
||||
if head.slot > parent.slot+1 {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if proposing early")
|
||||
return head.root
|
||||
}
|
||||
if secs >= orphanLateBlockProposingEarly {
|
||||
return head.root
|
||||
}
|
||||
return parent.root
|
||||
}
|
||||
|
||||
@@ -85,3 +85,84 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
})
|
||||
}
|
||||
|
||||
func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
f.numActiveValidators = 640
|
||||
f.justifiedBalances = make([]uint64, f.numActiveValidators)
|
||||
for i := range f.justifiedBalances {
|
||||
f.justifiedBalances[i] = uint64(10)
|
||||
f.store.committeeWeight += uint64(10)
|
||||
}
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
parentRoot := [32]byte{'a'}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
|
||||
|
||||
driftGenesisTime(f, 3, 1)
|
||||
childRoot := [32]byte{'b'}
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, parentRoot, f.GetProposerHead())
|
||||
|
||||
})
|
||||
t.Run("head is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode
|
||||
f.store.headNode = nil
|
||||
require.Equal(t, [32]byte{}, f.GetProposerHead())
|
||||
f.store.headNode = saved
|
||||
})
|
||||
t.Run("head is not from previous slot", func(t *testing.T) {
|
||||
driftGenesisTime(f, 4, 0)
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
driftGenesisTime(f, 3, 1)
|
||||
})
|
||||
t.Run("head is from epoch boundary", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
driftGenesisTime(f, params.BeaconConfig().SlotsPerEpoch, 0)
|
||||
f.store.headNode.slot = params.BeaconConfig().SlotsPerEpoch - 1
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
driftGenesisTime(f, 3, 1)
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
saved := f.store.headNode.timestamp
|
||||
f.store.headNode.timestamp = saved - 2
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
f.store.headNode.slot = 97
|
||||
driftGenesisTime(f, 98, 0)
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.slot = saved
|
||||
driftGenesisTime(f, 3, 1)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
f.store.headNode.parent = nil
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,12 +14,10 @@ import (
|
||||
)
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
// to find the best block for head. This function assumes a lock on s.nodesLock
|
||||
// to find the best block for head.
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
defer span.End()
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -71,9 +69,6 @@ func (s *Store) insert(ctx context.Context,
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
@@ -115,16 +110,12 @@ func (s *Store) insert(ctx context.Context,
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold {
|
||||
s.proposerBoostLock.Lock()
|
||||
s.proposerBoostRoot = root
|
||||
s.proposerBoostLock.Unlock()
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
return n, err
|
||||
}
|
||||
@@ -147,7 +138,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
|
||||
// starting from `node` down to the finalized Node or to a leaf of the Fork
|
||||
// choice store. This method assumes a lock on nodesLock.
|
||||
// choice store.
|
||||
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -173,12 +164,8 @@ func (s *Store) prune(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
s.checkpointsLock.RLock()
|
||||
finalizedRoot := s.finalizedCheckpoint.Root
|
||||
finalizedEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
|
||||
if !ok || finalizedNode == nil {
|
||||
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
|
||||
@@ -222,9 +209,6 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
var roots [][32]byte
|
||||
var slots []primitives.Slot
|
||||
|
||||
s.nodesLock.RLock()
|
||||
defer s.nodesLock.RUnlock()
|
||||
|
||||
for root, node := range s.nodeByRoot {
|
||||
if len(node.children) == 0 {
|
||||
roots = append(roots, root)
|
||||
@@ -236,28 +220,14 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
|
||||
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
|
||||
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
if f.store.highestReceivedNode == nil {
|
||||
return 0
|
||||
}
|
||||
return f.store.highestReceivedNode.slot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot returns the highest slot root received by the forkchoice
|
||||
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
if f.store.highestReceivedNode == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return f.store.highestReceivedNode.root
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
count := uint64(0)
|
||||
lowerBound := slots.CurrentSlot(f.store.genesisTime)
|
||||
var err error
|
||||
|
||||
@@ -320,26 +320,6 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestForkChoice_HighestReceivedBlockSlotRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
s := f.store
|
||||
_, err := s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(100), s.highestReceivedNode.slot)
|
||||
require.Equal(t, primitives.Slot(100), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, [32]byte{'A'}, f.HighestReceivedBlockRoot())
|
||||
_, err = s.insert(context.Background(), 1000, [32]byte{'B'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(1000), s.highestReceivedNode.slot)
|
||||
require.Equal(t, primitives.Slot(1000), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, [32]byte{'B'}, f.HighestReceivedBlockRoot())
|
||||
_, err = s.insert(context.Background(), 500, [32]byte{'C'}, [32]byte{}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(1000), s.highestReceivedNode.slot)
|
||||
require.Equal(t, primitives.Slot(1000), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, [32]byte{'B'}, f.HighestReceivedBlockRoot())
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
s := f.store
|
||||
|
||||
@@ -11,12 +11,12 @@ import (
|
||||
|
||||
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
|
||||
type ForkChoice struct {
|
||||
sync.RWMutex
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
balances []uint64 // tracks individual validator's balances last accounted in votes.
|
||||
justifiedBalances []uint64 // tracks individual validator's last justified balances.
|
||||
numActiveValidators uint64 // tracks the total number of active validators. Requires a checkpoints lock to read/write
|
||||
numActiveValidators uint64 // tracks the total number of active validators.
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
}
|
||||
|
||||
@@ -31,16 +31,13 @@ type Store struct {
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch. Requires a checkpoints lock to read/write
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
|
||||
@@ -16,9 +16,6 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
|
||||
@@ -31,9 +28,6 @@ func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoc
|
||||
}
|
||||
|
||||
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
|
||||
@@ -48,10 +42,6 @@ func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoc
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
@@ -81,16 +71,9 @@ func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
s.checkpointsLock.Lock()
|
||||
defer s.checkpointsLock.Unlock()
|
||||
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
|
||||
@@ -23,16 +23,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.nodesLock.RLock()
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
f.store.nodesLock.RUnlock()
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
|
||||
f.store.nodesLock.RLock()
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
f.store.nodesLock.RUnlock()
|
||||
|
||||
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
|
||||
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
|
||||
|
||||
@@ -124,7 +124,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// /
|
||||
// 5 <- head, justified epoch = 2
|
||||
//
|
||||
// We set this node's slot to be 64 so that when prunning below we do not prune its child
|
||||
// We set this node's slot to be 64 so that when pruning below we do not prune its child
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 2*params.BeaconConfig().SlotsPerEpoch, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -16,6 +16,10 @@ type BalancesByRooter func(context.Context, [32]byte) ([]uint64, error)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
type ForkChoicer interface {
|
||||
Lock()
|
||||
Unlock()
|
||||
RLock()
|
||||
RUnlock()
|
||||
HeadRetriever // to compute head.
|
||||
BlockProcessor // to track new block for fork choice.
|
||||
AttestationProcessor // to track new attestation for fork choice.
|
||||
@@ -26,9 +30,8 @@ type ForkChoicer interface {
|
||||
// HeadRetriever retrieves head root and optimistic info of the current chain.
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context) ([32]byte, error)
|
||||
GetProposerHead() [32]byte
|
||||
CachedHeadRoot() [32]byte
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
@@ -40,7 +43,6 @@ type BlockProcessor interface {
|
||||
// AttestationProcessor processes the attestation that's used for accounting fork choice.
|
||||
type AttestationProcessor interface {
|
||||
ProcessAttestation(context.Context, []uint64, [32]byte, primitives.Epoch)
|
||||
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
|
||||
}
|
||||
|
||||
// Getter returns fork choice related information.
|
||||
@@ -58,10 +60,12 @@ type Getter interface {
|
||||
BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
ShouldOverrideFCU() bool
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
@@ -74,4 +78,5 @@ type Setter interface {
|
||||
SetOriginRoot([32]byte)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
SetBalancesByRooter(BalancesByRooter)
|
||||
InsertSlashedIndex(context.Context, primitives.ValidatorIndex)
|
||||
}
|
||||
|
||||
@@ -7,16 +7,8 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProposerBoostRootArgs to call the BoostProposerRoot function.
|
||||
type ProposerBoostRootArgs struct {
|
||||
BlockRoot [32]byte
|
||||
BlockSlot primitives.Slot
|
||||
CurrentSlot primitives.Slot
|
||||
SecondsIntoSlot uint64
|
||||
}
|
||||
|
||||
// Checkpoint is an array version of ethpb.Checkpoint. It is used internally in
|
||||
// forkchoice, while the slice version is used in the interface to legagy code
|
||||
// forkchoice, while the slice version is used in the interface to legacy code
|
||||
// in other packages
|
||||
type Checkpoint struct {
|
||||
Epoch primitives.Epoch
|
||||
|
||||
@@ -129,12 +129,11 @@ func configureInteropConfig(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return nil
|
||||
}
|
||||
genStateIsSet := cliCtx.IsSet(flags.InteropGenesisStateFlag.Name)
|
||||
genTimeIsSet := cliCtx.IsSet(flags.InteropGenesisTimeFlag.Name)
|
||||
numValsIsSet := cliCtx.IsSet(flags.InteropNumValidatorsFlag.Name)
|
||||
votesIsSet := cliCtx.IsSet(flags.InteropMockEth1DataVotesFlag.Name)
|
||||
|
||||
if genStateIsSet || genTimeIsSet || numValsIsSet || votesIsSet {
|
||||
if genTimeIsSet || numValsIsSet || votesIsSet {
|
||||
if err := params.SetActive(params.InteropConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -143,18 +143,19 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
|
||||
"node2")), 0666))
|
||||
|
||||
require.NoError(t, set.Parse([]string{"test-command", "--" + cmd.ConfigFileFlag.Name, "flags_test.yaml"}))
|
||||
comFlags := cmd.WrapFlags([]cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: cmd.ConfigFileFlag.Name,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: cmd.BootstrapNode.Name,
|
||||
},
|
||||
})
|
||||
command := &cli.Command{
|
||||
Name: "test-command",
|
||||
Flags: cmd.WrapFlags([]cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: cmd.ConfigFileFlag.Name,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: cmd.BootstrapNode.Name,
|
||||
},
|
||||
}),
|
||||
Name: "test-command",
|
||||
Flags: comFlags,
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags)
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
|
||||
@@ -165,7 +166,7 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
require.NoError(t, command.Run(context))
|
||||
require.NoError(t, command.Run(context, context.Args().Slice()...))
|
||||
require.NoError(t, os.Remove("flags_test.yaml"))
|
||||
}
|
||||
|
||||
@@ -219,17 +220,6 @@ func TestConfigureInterop(t *testing.T) {
|
||||
},
|
||||
"interop",
|
||||
},
|
||||
{
|
||||
"genesis state set",
|
||||
func() *cli.Context {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.InteropGenesisStateFlag.Name, "", "")
|
||||
assert.NoError(t, set.Set(flags.InteropGenesisStateFlag.Name, "/path/"))
|
||||
return cli.NewContext(&app, set, nil)
|
||||
},
|
||||
"interop",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@@ -67,9 +66,6 @@ import (
|
||||
|
||||
const testSkipPowFlag = "test-skip-pow"
|
||||
|
||||
// 128MB max message size when enabling debug endpoints.
|
||||
const debugGrpcMaxMsgSize = 1 << 27
|
||||
|
||||
// Used as a struct to keep cli flag options for configuring services
|
||||
// for the beacon node. We keep this as a separate struct to not pollute the actual BeaconNode
|
||||
// struct, as it is merely used to pass down configuration options into the appropriate services.
|
||||
@@ -762,10 +758,9 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
}
|
||||
|
||||
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
|
||||
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
|
||||
var depositFetcher depositcache.DepositFetcher
|
||||
var chainStartFetcher execution.ChainStartFetcher
|
||||
if genesisValidators > 0 || genesisStatePath != "" {
|
||||
if genesisValidators > 0 {
|
||||
var interopService *interopcoldstart.Service
|
||||
if err := b.services.FetchService(&interopService); err != nil {
|
||||
return err
|
||||
@@ -787,9 +782,6 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
|
||||
maxMsgSize := b.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
if enableDebugRPCEndpoints {
|
||||
maxMsgSize = int(math.Max(float64(maxMsgSize), debugGrpcMaxMsgSize))
|
||||
}
|
||||
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
@@ -881,9 +873,6 @@ func (b *BeaconNode) registerGRPCGateway() error {
|
||||
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
httpModules := b.cliCtx.String(flags.HTTPModules.Name)
|
||||
timeout := b.cliCtx.Int(cmd.ApiTimeoutFlag.Name)
|
||||
if enableDebugRPCEndpoints {
|
||||
maxCallSize = uint64(math.Max(float64(maxCallSize), debugGrpcMaxMsgSize))
|
||||
}
|
||||
|
||||
gatewayConfig := gateway.DefaultConfig(enableDebugRPCEndpoints, httpModules)
|
||||
muxs := make([]*apigateway.PbMux, 0)
|
||||
@@ -917,15 +906,13 @@ func (b *BeaconNode) registerGRPCGateway() error {
|
||||
func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
|
||||
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
|
||||
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
|
||||
|
||||
if genesisValidators > 0 || genesisStatePath != "" {
|
||||
if genesisValidators > 0 {
|
||||
svc := interopcoldstart.NewService(b.ctx, &interopcoldstart.Config{
|
||||
GenesisTime: genesisTime,
|
||||
NumValidators: genesisValidators,
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
GenesisPath: genesisStatePath,
|
||||
})
|
||||
svc.Start()
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
|
||||
genesisBytes, err := genesisState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile("genesis_ssz.json", genesisBytes, 0666))
|
||||
set.String(flags.InteropGenesisStateFlag.Name, "genesis_ssz.json", "")
|
||||
set.String("genesis-state", "genesis_ssz.json", "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
|
||||
@@ -17,6 +17,8 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -18,6 +20,13 @@ import (
|
||||
// we only do it when the map is smaller than this upper bound.
|
||||
const blsChangesPoolThreshold = 2000
|
||||
|
||||
var (
|
||||
blsToExecMessageInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bls_to_exec_message_pool_total",
|
||||
Help: "The number of saved bls to exec message in the operation cool.",
|
||||
})
|
||||
)
|
||||
|
||||
// PoolManager maintains pending and seen BLS-to-execution-change objects.
|
||||
// This pool is used by proposers to insert BLS-to-execution-change objects into new blocks.
|
||||
type PoolManager interface {
|
||||
@@ -116,6 +125,9 @@ func (p *Pool) InsertBLSToExecChange(change *ethpb.SignedBLSToExecutionChange) {
|
||||
|
||||
p.pending.Append(doublylinkedlist.NewNode(change))
|
||||
p.m[change.Message.ValidatorIndex] = p.pending.Last()
|
||||
|
||||
blsToExecMessageInPoolTotal.Inc()
|
||||
|
||||
}
|
||||
|
||||
// MarkIncluded is used when an object has been included in a beacon block. Every block seen by this
|
||||
@@ -134,6 +146,8 @@ func (p *Pool) MarkIncluded(change *ethpb.SignedBLSToExecutionChange) {
|
||||
if p.numPending() == blsChangesPoolThreshold {
|
||||
p.cycleMap()
|
||||
}
|
||||
|
||||
blsToExecMessageInPoolTotal.Dec()
|
||||
}
|
||||
|
||||
// ValidatorExists checks if the bls to execution change object exists
|
||||
|
||||
@@ -157,6 +157,16 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
assert.Equal(t, primitives.ValidatorIndex(30), changes[1].Message.ValidatorIndex)
|
||||
})
|
||||
t.Run("invalid change not returned", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
saveByte := signedChanges[1].Message.FromBlsPubkey[5]
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = 0xff
|
||||
pool.InsertBLSToExecChange(signedChanges[1])
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(changes))
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
|
||||
})
|
||||
}
|
||||
|
||||
func TestInsertBLSToExecChange(t *testing.T) {
|
||||
|
||||
@@ -4,33 +4,40 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"service.go",
|
||||
"pool.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["service_test.go"],
|
||||
srcs = ["pool_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
@@ -14,12 +12,17 @@ type PoolMock struct {
|
||||
}
|
||||
|
||||
// PendingExits --
|
||||
func (m *PoolMock) PendingExits(_ state.ReadOnlyBeaconState, _ primitives.Slot, _ bool) []*eth.SignedVoluntaryExit {
|
||||
return m.Exits
|
||||
func (m *PoolMock) PendingExits() ([]*eth.SignedVoluntaryExit, error) {
|
||||
return m.Exits, nil
|
||||
}
|
||||
|
||||
// ExitsForInclusion --
|
||||
func (m *PoolMock) ExitsForInclusion(_ state.ReadOnlyBeaconState, _ primitives.Slot) ([]*eth.SignedVoluntaryExit, error) {
|
||||
return m.Exits, nil
|
||||
}
|
||||
|
||||
// InsertVoluntaryExit --
|
||||
func (m *PoolMock) InsertVoluntaryExit(_ context.Context, _ state.ReadOnlyBeaconState, exit *eth.SignedVoluntaryExit) {
|
||||
func (m *PoolMock) InsertVoluntaryExit(exit *eth.SignedVoluntaryExit) {
|
||||
m.Exits = append(m.Exits, exit)
|
||||
}
|
||||
|
||||
|
||||
139
beacon-chain/operations/voluntaryexits/pool.go
Normal file
139
beacon-chain/operations/voluntaryexits/pool.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
doublylinkedlist "github.com/prysmaticlabs/prysm/v3/container/doubly-linked-list"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PoolManager maintains pending and seen voluntary exits.
|
||||
// This pool is used by proposers to insert voluntary exits into new blocks.
|
||||
type PoolManager interface {
|
||||
PendingExits() ([]*ethpb.SignedVoluntaryExit, error)
|
||||
ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error)
|
||||
InsertVoluntaryExit(exit *ethpb.SignedVoluntaryExit)
|
||||
MarkIncluded(exit *ethpb.SignedVoluntaryExit)
|
||||
}
|
||||
|
||||
// Pool is a concrete implementation of PoolManager.
|
||||
type Pool struct {
|
||||
lock sync.RWMutex
|
||||
pending doublylinkedlist.List[*ethpb.SignedVoluntaryExit]
|
||||
m map[types.ValidatorIndex]*doublylinkedlist.Node[*ethpb.SignedVoluntaryExit]
|
||||
}
|
||||
|
||||
// NewPool returns an initialized pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pending: doublylinkedlist.List[*ethpb.SignedVoluntaryExit]{},
|
||||
m: make(map[types.ValidatorIndex]*doublylinkedlist.Node[*ethpb.SignedVoluntaryExit]),
|
||||
}
|
||||
}
|
||||
|
||||
// PendingExits returns all objects from the pool.
|
||||
func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
result := make([]*ethpb.SignedVoluntaryExit, p.pending.Len())
|
||||
node := p.pending.First()
|
||||
var err error
|
||||
for i := 0; node != nil; i++ {
|
||||
result[i], err = node.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err = node.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ExitsForInclusion returns objects that are ready for inclusion at the given slot. This method will not
|
||||
// return more than the block enforced MaxVoluntaryExits.
|
||||
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
|
||||
node := p.pending.First()
|
||||
for node != nil && len(result) < length {
|
||||
exit, err := node.Value()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
if exit.Exit.Epoch > slots.ToEpoch(slot) {
|
||||
node, err = node.Next()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
validator, err := state.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warningf("could not get validator at index %d", exit.Exit.ValidatorIndex)
|
||||
node, err = node.Next()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err = blocks.VerifyExitAndSignature(validator, state.Slot(), state.Fork(), exit, state.GenesisValidatorsRoot()); err != nil {
|
||||
logrus.WithError(err).Warning("removing invalid exit from pool")
|
||||
p.lock.RUnlock()
|
||||
// MarkIncluded removes the invalid exit from the pool
|
||||
p.MarkIncluded(exit)
|
||||
p.lock.RLock()
|
||||
} else {
|
||||
result = append(result, exit)
|
||||
}
|
||||
node, err = node.Next()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p.lock.RUnlock()
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// InsertVoluntaryExit into the pool.
|
||||
func (p *Pool) InsertVoluntaryExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
_, exists := p.m[exit.Exit.ValidatorIndex]
|
||||
if exists {
|
||||
return
|
||||
}
|
||||
|
||||
p.pending.Append(doublylinkedlist.NewNode(exit))
|
||||
p.m[exit.Exit.ValidatorIndex] = p.pending.Last()
|
||||
}
|
||||
|
||||
// MarkIncluded is used when an exit has been included in a beacon block. Every block seen by this
|
||||
// node should call this method to include the exit. This will remove the exit from the pool.
|
||||
func (p *Pool) MarkIncluded(exit *ethpb.SignedVoluntaryExit) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
node := p.m[exit.Exit.ValidatorIndex]
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(p.m, exit.Exit.ValidatorIndex)
|
||||
p.pending.Remove(node)
|
||||
}
|
||||
327
beacon-chain/operations/voluntaryexits/pool_test.go
Normal file
327
beacon-chain/operations/voluntaryexits/pool_test.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
func TestPendingExits(t *testing.T) {
|
||||
t.Run("empty pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
changes, err := pool.PendingExits()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(changes))
|
||||
})
|
||||
t.Run("non-empty pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
pool.InsertVoluntaryExit(ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
})
|
||||
pool.InsertVoluntaryExit(ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
})
|
||||
changes, err := pool.PendingExits()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(changes))
|
||||
})
|
||||
}
|
||||
|
||||
func TestExitsForInclusion(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
}
|
||||
stateSlot := types.Slot(uint64(params.BeaconConfig().ShardCommitteePeriod) * uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
spb.Slot = stateSlot
|
||||
numValidators := 2 * params.BeaconConfig().MaxVoluntaryExits
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
exits := make([]*ethpb.VoluntaryExit, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
if i == len(validators)-2 {
|
||||
// exit for this validator is invalid
|
||||
v.ExitEpoch = 0
|
||||
} else {
|
||||
v.ExitEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
v.PublicKey = pubkey
|
||||
|
||||
message := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
}
|
||||
// exit for future slot
|
||||
if i == len(validators)-1 {
|
||||
message.Epoch = slots.ToEpoch(stateSlot) + 1
|
||||
}
|
||||
|
||||
validators[i] = v
|
||||
exits[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedExits := make([]*ethpb.SignedVoluntaryExit, numValidators)
|
||||
for i, message := range exits {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainVoluntaryExit, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedVoluntaryExit{
|
||||
Exit: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedExits[i] = signed
|
||||
}
|
||||
|
||||
t.Run("empty pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(exits))
|
||||
})
|
||||
t.Run("less than MaxVoluntaryExits in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxVoluntaryExits-1; i++ {
|
||||
pool.InsertVoluntaryExit(signedExits[i])
|
||||
}
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits)-1, len(exits))
|
||||
})
|
||||
t.Run("MaxVoluntaryExits in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxVoluntaryExits; i++ {
|
||||
pool.InsertVoluntaryExit(signedExits[i])
|
||||
}
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits), len(exits))
|
||||
})
|
||||
t.Run("more than MaxVoluntaryExits in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertVoluntaryExit(signedExits[i])
|
||||
}
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxVoluntaryExits), len(exits))
|
||||
for _, ch := range exits {
|
||||
assert.NotEqual(t, types.ValidatorIndex(params.BeaconConfig().MaxVoluntaryExits), ch.Exit.ValidatorIndex)
|
||||
}
|
||||
})
|
||||
t.Run("exit for future epoch not returned", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
pool.InsertVoluntaryExit(signedExits[len(signedExits)-1])
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(exits))
|
||||
})
|
||||
t.Run("invalid exit not returned", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
pool.InsertVoluntaryExit(signedExits[len(signedExits)-2])
|
||||
exits, err := pool.ExitsForInclusion(st, stateSlot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(exits))
|
||||
})
|
||||
}
|
||||
|
||||
func TestInsertExit(t *testing.T) {
|
||||
t.Run("empty pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
},
|
||||
}
|
||||
pool.InsertVoluntaryExit(exit)
|
||||
require.Equal(t, 1, pool.pending.Len())
|
||||
require.Equal(t, 1, len(pool.m))
|
||||
n, ok := pool.m[0]
|
||||
require.Equal(t, true, ok)
|
||||
v, err := n.Value()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, exit, v)
|
||||
})
|
||||
t.Run("item in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
old := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
},
|
||||
}
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(1),
|
||||
},
|
||||
}
|
||||
pool.InsertVoluntaryExit(old)
|
||||
pool.InsertVoluntaryExit(exit)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
require.Equal(t, 2, len(pool.m))
|
||||
n, ok := pool.m[0]
|
||||
require.Equal(t, true, ok)
|
||||
v, err := n.Value()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, old, v)
|
||||
n, ok = pool.m[1]
|
||||
require.Equal(t, true, ok)
|
||||
v, err = n.Value()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, exit, v)
|
||||
})
|
||||
t.Run("validator index already exists", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
old := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
},
|
||||
Signature: []byte("old"),
|
||||
}
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
},
|
||||
Signature: []byte("exit"),
|
||||
}
|
||||
pool.InsertVoluntaryExit(old)
|
||||
pool.InsertVoluntaryExit(exit)
|
||||
assert.Equal(t, 1, pool.pending.Len())
|
||||
require.Equal(t, 1, len(pool.m))
|
||||
n, ok := pool.m[0]
|
||||
require.Equal(t, true, ok)
|
||||
v, err := n.Value()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, old, v)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMarkIncluded(t *testing.T) {
|
||||
t.Run("one element in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertVoluntaryExit(exit)
|
||||
pool.MarkIncluded(exit)
|
||||
assert.Equal(t, 0, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
assert.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("first of multiple elements", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
first := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
second := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(1),
|
||||
}}
|
||||
third := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(2),
|
||||
}}
|
||||
pool.InsertVoluntaryExit(first)
|
||||
pool.InsertVoluntaryExit(second)
|
||||
pool.InsertVoluntaryExit(third)
|
||||
pool.MarkIncluded(first)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
assert.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("last of multiple elements", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
first := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
second := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(1),
|
||||
}}
|
||||
third := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(2),
|
||||
}}
|
||||
pool.InsertVoluntaryExit(first)
|
||||
pool.InsertVoluntaryExit(second)
|
||||
pool.InsertVoluntaryExit(third)
|
||||
pool.MarkIncluded(third)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[2]
|
||||
assert.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("in the middle of multiple elements", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
first := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
second := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(1),
|
||||
}}
|
||||
third := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(2),
|
||||
}}
|
||||
pool.InsertVoluntaryExit(first)
|
||||
pool.InsertVoluntaryExit(second)
|
||||
pool.InsertVoluntaryExit(third)
|
||||
pool.MarkIncluded(second)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[1]
|
||||
assert.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("not in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
first := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
second := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(1),
|
||||
}}
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: types.ValidatorIndex(2),
|
||||
}}
|
||||
pool.InsertVoluntaryExit(first)
|
||||
pool.InsertVoluntaryExit(second)
|
||||
pool.MarkIncluded(exit)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
require.Equal(t, true, ok)
|
||||
assert.NotNil(t, pool.m[0])
|
||||
_, ok = pool.m[1]
|
||||
require.Equal(t, true, ok)
|
||||
assert.NotNil(t, pool.m[1])
|
||||
})
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// PoolManager maintains pending and seen voluntary exits.
|
||||
// This pool is used by proposers to insert voluntary exits into new blocks.
|
||||
type PoolManager interface {
|
||||
PendingExits(state state.ReadOnlyBeaconState, slot primitives.Slot, noLimit bool) []*ethpb.SignedVoluntaryExit
|
||||
InsertVoluntaryExit(ctx context.Context, state state.ReadOnlyBeaconState, exit *ethpb.SignedVoluntaryExit)
|
||||
MarkIncluded(exit *ethpb.SignedVoluntaryExit)
|
||||
}
|
||||
|
||||
// Pool is a concrete implementation of PoolManager.
|
||||
type Pool struct {
|
||||
lock sync.RWMutex
|
||||
pending []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
// NewPool accepts a head fetcher (for reading the validator set) and returns an initialized
|
||||
// voluntary exit pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pending: make([]*ethpb.SignedVoluntaryExit, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// PendingExits returns exits that are ready for inclusion at the given slot. This method will not
|
||||
// return more than the block enforced MaxVoluntaryExits.
|
||||
func (p *Pool) PendingExits(state state.ReadOnlyBeaconState, slot primitives.Slot, noLimit bool) []*ethpb.SignedVoluntaryExit {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
// Allocate pending slice with a capacity of min(len(p.pending), maxVoluntaryExits) since the
|
||||
// array cannot exceed the max and is typically less than the max value.
|
||||
maxExits := params.BeaconConfig().MaxVoluntaryExits
|
||||
if noLimit {
|
||||
maxExits = uint64(len(p.pending))
|
||||
}
|
||||
pending := make([]*ethpb.SignedVoluntaryExit, 0, maxExits)
|
||||
for _, e := range p.pending {
|
||||
if e.Exit.Epoch > slots.ToEpoch(slot) {
|
||||
continue
|
||||
}
|
||||
if v, err := state.ValidatorAtIndexReadOnly(e.Exit.ValidatorIndex); err == nil &&
|
||||
v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
pending = append(pending, e)
|
||||
if uint64(len(pending)) == maxExits {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return pending
|
||||
}
|
||||
|
||||
// InsertVoluntaryExit into the pool. This method is a no-op if the pending exit already exists,
|
||||
// or the validator is already exited.
|
||||
func (p *Pool) InsertVoluntaryExit(ctx context.Context, state state.ReadOnlyBeaconState, exit *ethpb.SignedVoluntaryExit) {
|
||||
ctx, span := trace.StartSpan(ctx, "exitPool.InsertVoluntaryExit")
|
||||
defer span.End()
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
// Prevent malformed messages from being inserted.
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return
|
||||
}
|
||||
|
||||
existsInPending, index := existsInList(p.pending, exit.Exit.ValidatorIndex)
|
||||
// If the item exists in the pending list and includes a more favorable, earlier
|
||||
// exit epoch, we replace it in the pending list. If it exists but the prior condition is false,
|
||||
// we simply return.
|
||||
if existsInPending {
|
||||
if exit.Exit.Epoch < p.pending[index].Exit.Epoch {
|
||||
p.pending[index] = exit
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Has the validator been exited already?
|
||||
if v, err := state.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex); err != nil ||
|
||||
v.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
// Insert into pending list and sort.
|
||||
p.pending = append(p.pending, exit)
|
||||
sort.Slice(p.pending, func(i, j int) bool {
|
||||
return p.pending[i].Exit.ValidatorIndex < p.pending[j].Exit.ValidatorIndex
|
||||
})
|
||||
}
|
||||
|
||||
// MarkIncluded is used when an exit has been included in a beacon block. Every block seen by this
|
||||
// node should call this method to include the exit. This will remove the exit from
|
||||
// the pending exits slice.
|
||||
func (p *Pool) MarkIncluded(exit *ethpb.SignedVoluntaryExit) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
exists, index := existsInList(p.pending, exit.Exit.ValidatorIndex)
|
||||
if exists {
|
||||
// Exit we want is present at p.pending[index], so we remove it.
|
||||
p.pending = append(p.pending[:index], p.pending[index+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Binary search to check if the index exists in the list of pending exits.
|
||||
func existsInList(pending []*ethpb.SignedVoluntaryExit, searchingFor primitives.ValidatorIndex) (bool, int) {
|
||||
i := sort.Search(len(pending), func(j int) bool {
|
||||
return pending[j].Exit.ValidatorIndex >= searchingFor
|
||||
})
|
||||
if i < len(pending) && pending[i].Exit.ValidatorIndex == searchingFor {
|
||||
return true, i
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
@@ -1,530 +0,0 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestPool_InsertVoluntaryExit(t *testing.T) {
|
||||
type fields struct {
|
||||
pending []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
type args struct {
|
||||
exit *ethpb.SignedVoluntaryExit
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want []*ethpb.SignedVoluntaryExit
|
||||
}{
|
||||
{
|
||||
name: "Prevent inserting nil exit",
|
||||
fields: fields{
|
||||
pending: make([]*ethpb.SignedVoluntaryExit, 0),
|
||||
},
|
||||
args: args{
|
||||
exit: nil,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
{
|
||||
name: "Prevent inserting malformed exit",
|
||||
fields: fields{
|
||||
pending: make([]*ethpb.SignedVoluntaryExit, 0),
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: nil,
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
{
|
||||
name: "Empty list",
|
||||
fields: fields{
|
||||
pending: make([]*ethpb.SignedVoluntaryExit, 0),
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate identical exit",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate exit in pending list",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate validator index",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 20,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate received with more favorable exit epoch",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 4,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 4,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Exit for already exited validator",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
{
|
||||
name: "Maintains sorted order",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 10,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 10,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 12,
|
||||
ValidatorIndex: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
validators := []*ethpb.Validator{
|
||||
{ // 0
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{ // 1
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{ // 2 - Already exited.
|
||||
ExitEpoch: 15,
|
||||
},
|
||||
{ // 3
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := &Pool{
|
||||
pending: tt.fields.pending,
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
p.InsertVoluntaryExit(ctx, s, tt.args.exit)
|
||||
if len(p.pending) != len(tt.want) {
|
||||
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pending), len(tt.want))
|
||||
}
|
||||
for i := range p.pending {
|
||||
if !proto.Equal(p.pending[i], tt.want[i]) {
|
||||
t.Errorf("Pending exit at index %d does not match expected. Got=%v wanted=%v", i, p.pending[i], tt.want[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPool_MarkIncluded(t *testing.T) {
|
||||
type fields struct {
|
||||
pending []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
type args struct {
|
||||
exit *ethpb.SignedVoluntaryExit
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want fields
|
||||
}{
|
||||
{
|
||||
name: "Removes from pending list",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 1},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 2},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
exit: ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 2},
|
||||
},
|
||||
},
|
||||
want: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 1},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{ValidatorIndex: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := &Pool{
|
||||
pending: tt.fields.pending,
|
||||
}
|
||||
p.MarkIncluded(tt.args.exit)
|
||||
if len(p.pending) != len(tt.want.pending) {
|
||||
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pending), len(tt.want.pending))
|
||||
}
|
||||
for i := range p.pending {
|
||||
if !proto.Equal(p.pending[i], tt.want.pending[i]) {
|
||||
t.Errorf("Pending exit at index %d does not match expected. Got=%v wanted=%v", i, p.pending[i], tt.want.pending[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPool_PendingExits(t *testing.T) {
|
||||
type fields struct {
|
||||
pending []*ethpb.SignedVoluntaryExit
|
||||
noLimit bool
|
||||
}
|
||||
type args struct {
|
||||
slot primitives.Slot
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want []*ethpb.SignedVoluntaryExit
|
||||
}{
|
||||
{
|
||||
name: "Empty list",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
args: args{
|
||||
slot: 100000,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{},
|
||||
},
|
||||
{
|
||||
name: "All eligible",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1000000,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "All eligible, above max",
|
||||
fields: fields{
|
||||
noLimit: true,
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 5}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 6}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 7}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 8}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 9}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 10}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 11}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 12}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 13}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 14}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 15}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 16}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 17}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 18}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 19}},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1000000,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 5}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 6}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 7}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 8}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 9}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 10}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 11}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 12}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 13}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 14}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 15}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 16}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 17}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 18}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 19}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "All eligible, block max",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 5}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 6}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 7}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 8}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 9}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 10}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 11}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 12}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 13}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 14}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 15}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 16}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 17}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 18}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 19}},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1000000,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 5}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 6}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 7}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 8}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 9}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 10}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 11}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 12}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 13}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 14}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 15}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Some eligible",
|
||||
fields: fields{
|
||||
pending: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 3}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 4}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
},
|
||||
want: []*ethpb.SignedVoluntaryExit{
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 0}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 2}},
|
||||
{Exit: ðpb.VoluntaryExit{Epoch: 1}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := &Pool{
|
||||
pending: tt.fields.pending,
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{{ExitEpoch: params.BeaconConfig().FarFutureEpoch}}})
|
||||
require.NoError(t, err)
|
||||
if got := p.PendingExits(s, tt.args.slot, tt.fields.noLimit); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("PendingExits() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
@@ -45,7 +44,6 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -13,11 +12,6 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
|
||||
@@ -15,7 +15,6 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -40,7 +39,6 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
|
||||
)
|
||||
@@ -291,9 +290,6 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
|
||||
func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return "disabled"
|
||||
}
|
||||
score := s.score(pid)
|
||||
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
|
||||
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/time"
|
||||
@@ -460,16 +459,6 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
tt.check(scorer)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("peer scorer disabled", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -15,11 +14,6 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
)
|
||||
|
||||
var _ Scorer = (*Service)(nil)
|
||||
@@ -138,10 +137,8 @@ func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.peerStatusScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
if s.scorers.gossipScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
|
||||
@@ -544,11 +543,6 @@ func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
// Default to old method if flag isnt enabled.
|
||||
if !features.Get().EnablePeerScorer {
|
||||
p.deprecatedPrune()
|
||||
return
|
||||
}
|
||||
// Exit early if there is nothing to prune.
|
||||
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
|
||||
return
|
||||
@@ -593,52 +587,6 @@ func (p *Status) Prune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// Deprecated: This is the old peer pruning method based on
|
||||
// bad response counts.
|
||||
func (p *Status) deprecatedPrune() {
|
||||
// Exit early if there is nothing to prune.
|
||||
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
|
||||
return
|
||||
}
|
||||
|
||||
notBadPeer := func(peerData *peerdata.PeerData) bool {
|
||||
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
}
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort peers in ascending order, so the peers with the
|
||||
// least amount of bad responses are pruned first. This
|
||||
// is to protect the node from malicious/lousy peers so
|
||||
// that their memory is still kept.
|
||||
sort.Slice(peersToPrune, func(i, j int) bool {
|
||||
return peersToPrune[i].badResp < peersToPrune[j].badResp
|
||||
})
|
||||
|
||||
limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
|
||||
if limitDiff > len(peersToPrune) {
|
||||
limitDiff = len(peersToPrune)
|
||||
}
|
||||
peersToPrune = peersToPrune[:limitDiff]
|
||||
// Delete peers from map.
|
||||
for _, peerData := range peersToPrune {
|
||||
p.store.DeletePeerData(peerData.pid)
|
||||
}
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
@@ -746,9 +694,6 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
|
||||
// bad response count. In the future scoring will be used
|
||||
// to determine the most suitable peers to take out.
|
||||
func (p *Status) PeersToPrune() []peer.ID {
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return p.deprecatedPeersToPrune()
|
||||
}
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := uint64(p.InboundLimit())
|
||||
activePeers := p.Active()
|
||||
@@ -812,71 +757,6 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
return ids
|
||||
}
|
||||
|
||||
// Deprecated: Is used to represent the older method
|
||||
// of pruning which utilized bad response counts.
|
||||
func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := p.InboundLimit()
|
||||
activePeers := p.Active()
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
}
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
peerData.Direction == network.DirInbound {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort in descending order to favour pruning peers with a
|
||||
// higher bad response count.
|
||||
sort.Slice(peersToPrune, func(i, j int) bool {
|
||||
return peersToPrune[i].badResp > peersToPrune[j].badResp
|
||||
})
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = uint64(numInboundPeers - inBoundLimit)
|
||||
}
|
||||
// Prune the largest amount between excess peers and
|
||||
// excess inbound peers.
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
for _, pr := range peersToPrune {
|
||||
ids = append(ids, pr.pid)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// HighestEpoch returns the highest epoch reported epoch amongst peers.
|
||||
func (p *Status) HighestEpoch() primitives.Epoch {
|
||||
p.store.RLock()
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
|
||||
@@ -549,10 +548,6 @@ func TestPrune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -587,7 +582,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -691,10 +686,6 @@ func TestAtInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePeers(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -745,13 +736,11 @@ func TestPrunePeers(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure it is in the descending order.
|
||||
currCount, err := p.Scorers().BadResponsesScorer().Count(peersToPrune[0])
|
||||
require.NoError(t, err)
|
||||
currScore := p.Scorers().Score(peersToPrune[0])
|
||||
for _, pid := range peersToPrune {
|
||||
count, err := p.Scorers().BadResponsesScorer().Count(pid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, currCount >= count)
|
||||
currCount = count
|
||||
score := p.Scorers().BadResponsesScorer().Score(pid)
|
||||
assert.Equal(t, true, currScore >= score)
|
||||
currScore = score
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/async"
|
||||
@@ -133,7 +132,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
}
|
||||
|
||||
s.host = h
|
||||
s.host.RemoveStreamHandler(identify.IDDelta)
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
|
||||
@@ -165,7 +165,7 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s.stateNotifier = &mock.MockStateNotifier{}
|
||||
|
||||
|
||||
// required params to addForkEntry in s.forkWatcher
|
||||
s.genesisTime = time.Now()
|
||||
beaconCfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -51,7 +51,8 @@ func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
|
||||
// SetStreamHandlerMatch --
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, network.StreamHandler) {}
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
}
|
||||
|
||||
// RemoveStreamHandler --
|
||||
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
|
||||
@@ -46,6 +46,7 @@ pkg_deb(
|
||||
package = "prysm-beacon-chain",
|
||||
postinst = "postinst.sh",
|
||||
preinst = "preinst.sh",
|
||||
tags = ["no-remote"],
|
||||
version_file = "//runtime:version_file",
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_r3labs_sse//:go_default_library",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user