Compare commits

..

1 Commits

Author SHA1 Message Date
Potuz
76d1ca4a86 Init sync fixes
- Only save finalized checkpoint to DB if it's newer than previous
  checkpoint
- Do not save Justified checkpoints to DB
2022-08-30 12:05:27 -03:00
2635 changed files with 69714 additions and 116017 deletions

184
.bazelrc
View File

@@ -1,9 +1,9 @@
# Import bazelrc presets
import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# Print warnings for tests with inappropriate test size or timeout.
test --test_verbose_timeout_warnings
# Only build test targets when running bazel test //...
test --build_tests_only
test --test_output=errors
# E2E run with debug gotag
test:e2e --define gotags=debug
@@ -11,10 +11,26 @@ test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
build --host_force_python=PY2
test --host_force_python=PY2
run --host_force_python=PY2
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
# is required within the sandbox. Network sandboxing only works on linux.
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --stamp
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
test --incompatible_strict_action_env
run --incompatible_strict_action_env
build --define blst_disabled=false
test --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -25,7 +41,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --stamp
build:release --config=llvm
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
@@ -56,6 +72,42 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# osx_arm64 config for cross compiler toolchain
build:osx_arm64 --config=cross
build:osx_arm64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64_cgo
build:osx_arm64 --compiler=osxcross
build:osx_arm64 --cpu=aarch64
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
@@ -63,5 +115,123 @@ build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# osx_arm64 debug config
build:osx_arm64_debug --config=debug
build:osx_arm64_debug --config=osx_arm64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# osx_arm64 docker sandbox build config
build:osx_arm64_docker --config=docker-sandbox --config=osx_arm64
build:osx_arm64_docker_debug --config=osx_arm64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -1 +1 @@
6.1.0
5.0.0

View File

@@ -43,12 +43,4 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh

View File

@@ -11,7 +11,7 @@ name = "go"
enabled = true
[analyzers.meta]
import_paths = ["github.com/prysmaticlabs/prysm/v4"]
import_paths = ["github.com/prysmaticlabs/prysm/v3"]
[[analyzers]]
name = "test-coverage"

10
.github/CODEOWNERS vendored
View File

@@ -5,4 +5,12 @@
*.bzl @prestonvanloon
# Anyone on prylabs team can approve dependency updates.
deps.bzl @prysmaticlabs/core-team
deps.bzl @prysmaticlabs/core-team
# Radek and Nishant are responsible for changes that can affect the native state feature.
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan

View File

@@ -16,12 +16,12 @@ Existing issues often contain information about workarounds, resolution, or prog
### Description
<!-- ✍️ A clear and concise description of the problem or missing capability... -->
<!-- ✍️--> A clear and concise description of the problem or missing capability...
### Describe the solution you'd like
<!-- ✍️ If you have a solution in mind, please describe it. -->
<!-- ✍️--> If you have a solution in mind, please describe it.
### Describe alternatives you've considered
<!-- ✍️ Have you considered any alternative solutions or workarounds? -->
<!-- ✍️--> Have you considered any alternative solutions or workarounds?

View File

@@ -26,14 +26,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.20
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: 1.18
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.20
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: 1.18
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.52.2
version: v1.47.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: '1.20'
go-version: 1.18
id: go
- name: Check out code into the Go module directory

3
.gitignore vendored
View File

@@ -38,6 +38,3 @@ metaData
# execution API authentication
jwt.hex
# manual testing
tmp

View File

@@ -6,7 +6,7 @@ run:
- proto
- tools/analyzers
timeout: 10m
go: '1.19'
go: '1.18'
linters:
disable-all: true
@@ -17,10 +17,6 @@ linters:
- errcheck
- gosimple
- gocognit
- dupword
- nilerr
- whitespace
- misspell
linters-settings:
gocognit:

View File

@@ -12,7 +12,7 @@ exports_files([
"LICENSE.md",
])
# gazelle:prefix github.com/prysmaticlabs/prysm/v4
# gazelle:prefix github.com/prysmaticlabs/prysm/v3
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl

View File

@@ -1,4 +1,4 @@
# Dependency Management in Prysm
# Dependency Managagement in Prysm
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
pb.go files at build time when file changes are present in any protobuf definition file or after
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
the following scripts often to ensure their generated files are up to date. Further more, Prysm
generates SSZ marshal related code based on defined data structures. These generated files must
also be updated and checked in as frequently.

View File

@@ -2,16 +2,16 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![Consensus_Spec_Version 1.2.0-rc.3](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.2.0.rc.3-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
[![Execution_API_Version 1.0.0-beta.1](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.1-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
### Staking on Mainnet

120
WORKSPACE
View File

@@ -4,18 +4,15 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
http_archive(
name = "rules_pkg",
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
name = "bazel_toolchains",
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
strip_prefix = "bazel-toolchains-3.7.0",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
],
)
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
@@ -42,6 +39,10 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
configure_prysm_toolchains()
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
rbe_toolchains_config()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
@@ -75,8 +76,9 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
strip_prefix = "rules_docker-0.19.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
)
http_archive(
@@ -86,10 +88,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
sha256 = "16e9fca53ed6bd4ff4ad76facc9b7b651a89db1689a2877d6fd7b82aa824e366",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.34.0/rules_go-v0.34.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.34.0/rules_go-v0.34.0.zip",
],
)
@@ -108,6 +110,13 @@ git_repository(
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
http_archive(
name = "fuzzit_linux",
build_file_content = "exports_files([\"fuzzit\"])",
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -120,36 +129,32 @@ load(
"container_pull",
)
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
container_pull(
name = "cc_image_base_amd64",
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
name = "cc_image_base",
digest = "sha256:2c4bb6b7236db0a55ec54ba8845e4031f5db2be957ac61867872bf42e56c4deb",
registry = "gcr.io",
repository = "distroless/cc-debian11",
repository = "distroless/cc",
)
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
container_pull(
name = "cc_debug_image_base_amd64",
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
name = "cc_debug_image_base",
digest = "sha256:3680c61e81f68fc00bfb5e1ec65e8e678aaafa7c5f056bc2681c29527ebbb30c",
registry = "gcr.io",
repository = "distroless/cc-debian11",
repository = "distroless/cc",
)
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
container_pull(
name = "go_image_base_amd64",
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
name = "go_image_base",
digest = "sha256:ba7a315f86771332e76fa9c3d423ecfdbb8265879c6f1c264d6fff7d4fa460a4",
registry = "gcr.io",
repository = "distroless/base-debian11",
repository = "distroless/base",
)
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
container_pull(
name = "go_debug_image_base_amd64",
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
name = "go_debug_image_base",
digest = "sha256:efd8711717d9e9b5d0dbb20ea10876dab0609c923bc05321b912f9239090ca80",
registry = "gcr.io",
repository = "distroless/base-debian11",
repository = "distroless/base",
)
container_pull(
@@ -159,15 +164,35 @@ container_pull(
repository = "pinglamb/alpine-glibc",
)
container_pull(
name = "fuzzit_base",
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
registry = "gcr.io",
repository = "fuzzit-public/stretch-llvm8",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.3",
go_version = "1.18.5",
nogo = "@//:nogo",
)
http_archive(
name = "prysm_testnet_site",
build_file_content = """
proto_library(
name = "faucet_proto",
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -190,22 +215,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
http_archive(
name = "eip4881_spec_tests",
build_file_content = """
filegroup(
name = "test_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0"
consensus_spec_version = "v1.2.0-rc.3"
bls_test_version = "v0.1.1"
@@ -221,7 +231,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +247,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +263,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -299,9 +309,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
)
http_archive(
@@ -332,9 +342,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
sha256 = "e0c0b5dc609b3a221e74c720f483c595441f2ad5e38bb8aa3522636039945a6f",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.1/prysm-web-ui.tar.gz",
],
)

View File

@@ -8,7 +8,7 @@ go_library(
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
importpath = "github.com/prysmaticlabs/prysm/v3/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
@@ -20,7 +20,6 @@ go_library(
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",

View File

@@ -6,25 +6,25 @@ import (
"path"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v3/io/file"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.ReadOnlySignedBeaconBlock
b interfaces.SignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
@@ -49,12 +49,12 @@ func (o *OriginData) StateBytes() []byte {
return o.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
func (o *OriginData) BlockBytes() []byte {
return o.bb
}
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
}
@@ -74,9 +74,6 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
@@ -103,8 +100,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
return &OriginData{
st: s,
b: b,
@@ -116,17 +113,17 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch primitives.Epoch
Epoch types.Epoch
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
@@ -238,7 +235,7 @@ func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubje
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err

View File

@@ -10,21 +10,21 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v3/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
type testRT struct {
@@ -102,7 +102,7 @@ func TestFname(t *testing.T) {
Config: params.MainnetConfig(),
Fork: version.Phase0,
}
slot := primitives.Slot(23)
slot := types.Slot(23)
prefix := "block"
var root [32]byte
copy(root[:], []byte{0x23, 0x23, 0x23})
@@ -335,7 +335,7 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
@@ -357,7 +357,7 @@ func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, types.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
@@ -398,7 +398,11 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
if err := st.SetValidators(validators); err != nil {
return err
}
return st.SetBalances(balances)
if err := st.SetBalances(balances); err != nil {
return err
}
return nil
}
func TestDownloadFinalizedData(t *testing.T) {
@@ -414,7 +418,6 @@ func TestDownloadFinalizedData(t *testing.T) {
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())

View File

@@ -16,28 +16,25 @@ import (
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v3/network/forks"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getConfigSpecPath = "/eth/v1/config/spec"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
@@ -64,7 +61,7 @@ func IdFromRoot(r [32]byte) StateOrBlockId {
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s primitives.Slot) StateOrBlockId {
func IdFromSlot(s types.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
}
@@ -149,6 +146,7 @@ func withSSZEncoding() reqOption {
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
log.Printf("requesting %s", u.String())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
@@ -254,20 +252,6 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
fsr := &v1.SpecResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
return fsr, nil
}
type NodeVersion struct {
implementation string
semver string
@@ -353,66 +337,12 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
return nil, err
}
return &WeakSubjectivityData{
Epoch: primitives.Epoch(epoch),
Epoch: types.Epoch(epoch),
BlockRoot: bytesutil.ToBytes32(blockRoot),
StateRoot: bytesutil.ToBytes32(stateRoot),
}, nil
}
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
if err != nil {
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.hc.Do(req)
if err != nil {
return err
}
defer func() {
err = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
decoder := json.NewDecoder(resp.Body)
decoder.DisallowUnknownFields()
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
if err := decoder.Decode(errorJson); err != nil {
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
}
return nil
}
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
err = json.Unmarshal(body, poolResponse)
if err != nil {
return nil, err
}
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
@@ -458,7 +388,7 @@ func (f *forkResponse) Fork() (*ethpb.Fork, error) {
return &ethpb.Fork{
CurrentVersion: cSlice,
PreviousVersion: pSlice,
Epoch: primitives.Epoch(epoch),
Epoch: types.Epoch(epoch),
}, nil
}
@@ -483,7 +413,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
Epoch: types.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)

View File

@@ -4,7 +4,7 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestParseNodeVersion(t *testing.T) {

View File

@@ -1,5 +1,6 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -3,29 +3,20 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bid.go",
"client.go",
"errors.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
importpath = "github.com/prysmaticlabs/prysm/v3/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -41,12 +32,10 @@ go_test(
embed = [":go_default_library"],
deps = [
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",

View File

@@ -1,201 +0,0 @@
package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
// SignedBid is an interface describing the method set of a signed builder bid.
type SignedBid interface {
Message() (Bid, error)
Signature() []byte
Version() int
IsNil() bool
}
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
Value() []byte
Pubkey() []byte
Version() int
IsNil() bool
HashTreeRoot() ([32]byte, error)
HashTreeRootWith(hh *ssz.Hasher) error
}
type signedBuilderBid struct {
p *ethpb.SignedBuilderBid
}
// WrappedSignedBuilderBid is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
w := signedBuilderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBid) Message() (Bid, error) {
return WrappedBuilderBid(b.p.Message)
}
// Signature --
func (b signedBuilderBid) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBid) Version() int {
return version.Bellatrix
}
// IsNil --
func (b signedBuilderBid) IsNil() bool {
return b.p == nil
}
type signedBuilderBidCapella struct {
p *ethpb.SignedBuilderBidCapella
}
// WrappedSignedBuilderBidCapella is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
w := signedBuilderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidCapella) Message() (Bid, error) {
return WrappedBuilderBidCapella(b.p.Message)
}
// Signature --
func (b signedBuilderBidCapella) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidCapella) Version() int {
return version.Capella
}
// IsNil --
func (b signedBuilderBidCapella) IsNil() bool {
return b.p == nil
}
type builderBid struct {
p *ethpb.BuilderBid
}
// WrappedBuilderBid is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
w := builderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header --
func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
}
// Value --
func (b builderBid) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBid) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBid) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBid) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBid) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidCapella struct {
p *ethpb.BuilderBidCapella
}
// WrappedBuilderBidCapella is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
w := builderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
}
// Version --
func (b builderBidCapella) Version() int {
return version.Capella
}
// Value --
func (b builderBidCapella) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidCapella) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidCapella) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}

View File

@@ -9,19 +9,14 @@ import (
"net"
"net/http"
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -35,12 +30,17 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
type observer interface {
observe(r *http.Request) error
}
@@ -83,15 +83,6 @@ func (*requestLogger) observe(r *http.Request) (e error) {
var _ observer = &requestLogger{}
// BuilderClient provides a collection of helper methods for calling Builder API endpoints.
type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
Status(ctx context.Context) error
}
// Client provides a collection of helper methods for calling Builder API endpoints.
type Client struct {
hc *http.Client
@@ -103,8 +94,7 @@ type Client struct {
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
endpoint := covertEndPoint(host)
u, err := urlForHost(endpoint.Url)
u, err := urlForHost(host)
if err != nil {
return nil, err
}
@@ -156,7 +146,6 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
if err != nil {
return
}
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
}
@@ -189,9 +178,9 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
func execHeaderPath(slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
func execHeaderPath(slot types.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
v := struct {
Slot primitives.Slot
Slot types.Slot
ParentHash string
Pubkey string
}{
@@ -207,8 +196,8 @@ func execHeaderPath(slot primitives.Slot, parentHash [32]byte, pubkey [48]byte)
return b.String(), nil
}
// GetHeader is used by a proposing validator to request an execution payload header from the Builder node.
func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error) {
// GetHeader is used by a proposing validator to request an ExecutionPayloadHeader from the Builder node.
func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubkey [48]byte) (*ethpb.SignedBuilderBid, error) {
path, err := execHeaderPath(slot, parentHash, pubkey)
if err != nil {
return nil, err
@@ -217,34 +206,11 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
if err != nil {
return nil, err
}
v := &VersionResponse{}
if err := json.Unmarshal(hb, v); err != nil {
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidCapella(p)
case strings.ToLower(version.String(version.Bellatrix)):
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrap(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBid(p)
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
return hr.ToProto()
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
@@ -275,81 +241,22 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if !sb.IsBlinded() {
return nil, errNotBlinded
// The response is the full ExecutionPayload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
v := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: sb}
body, err := json.Marshal(v)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayload(p)
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p, 0)
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
return ep.ToProto()
}
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
@@ -367,6 +274,9 @@ func non200Err(response *http.Response) error {
if err != nil {
body = "(Unable to read response body.)"
} else {
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
@@ -375,34 +285,13 @@ func non200Err(response *http.Response) error {
log.WithError(ErrNoContent).Debug(msg)
return ErrNoContent
case 400:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrBadRequest).Debug(msg)
return errors.Wrap(ErrBadRequest, errMessage.Message)
case 404:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotFound).Debug(msg)
return errors.Wrap(ErrNotFound, errMessage.Message)
case 500:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
return errors.Wrap(ErrNotOK, errMessage.Message)
}
}
func covertEndPoint(ep string) network.Endpoint {
return network.Endpoint{
Url: ep,
Auth: network.AuthorizationData{ // Auth is not used for builder.
Method: authorization.None,
Value: "",
}}
}

View File

@@ -6,21 +6,18 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
type roundtrip func(*http.Request) (*http.Response, error)
@@ -84,7 +81,7 @@ func TestClient_Status(t *testing.T) {
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -114,7 +111,6 @@ func TestClient_RegisterValidator(t *testing.T) {
Timestamp: 42,
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}
@@ -122,223 +118,80 @@ func TestClient_RegisterValidator(t *testing.T) {
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
})
t.Run("header not available", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusNoContent,
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNoContent)
})
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature()))
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.TransactionsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
require.Equal(t, uint64(1), bidHeader.GasUsed())
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseUnknownVersion)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "unsupported header version", err)
})
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
Request: r.Clone(ctx),
}, nil
}),
}
c = &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature))
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.Equal(t, true, bytes.Equal(expectedTxRoot, h.Message.Header.TransactionsRoot))
require.Equal(t, uint64(1), h.Message.Header.GasUsed)
value := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", h.Message.Value))
}
func TestSubmitBlindedBlock(t *testing.T) {
ctx := context.Background()
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
require.Equal(t, uint64(1), ep.GasLimit())
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)
})
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb := testSignedBlindedBeaconBlockBellatrix(t)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash))
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas))
require.Equal(t, uint64(1), ep.GasLimit)
}
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
@@ -483,149 +336,6 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
}
}
func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconBlockCapella {
return &eth.SignedBlindedBeaconBlockCapella{
Block: &eth.BlindedBeaconBlockCapella{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyCapella{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderCapella{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}

View File

@@ -1,15 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -1,50 +0,0 @@
package testing
import (
"context"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// MockClient is a mock implementation of BuilderClient.
type MockClient struct {
RegisteredVals map[[48]byte]bool
}
// NewClient creates a new, correctly initialized mock.
func NewClient() MockClient {
return MockClient{RegisteredVals: map[[48]byte]bool{}}
}
// NodeURL --
func (MockClient) NodeURL() string {
return ""
}
// GetHeader --
func (MockClient) GetHeader(_ context.Context, _ primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
return nil, nil
}
// RegisterValidator --
func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
for _, r := range svr {
b := bytesutil.ToBytes48(r.Message.Pubkey)
m.RegisteredVals[b] = true
}
return nil
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
}
// Status --
func (MockClient) Status(_ context.Context) error {
return nil
}

View File

@@ -8,10 +8,9 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
type SignedValidatorRegistration struct {
@@ -24,8 +23,8 @@ type ValidatorRegistration struct {
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Message: &ValidatorRegistration{r.Message},
Signature: r.SignedValidatorRegistrationV1.Signature,
@@ -37,8 +36,8 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
}
o := struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -50,10 +49,10 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}{
FeeRecipient: r.FeeRecipient,
GasLimit: fmt.Sprintf("%d", r.GasLimit),
@@ -67,10 +66,10 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
}
o := struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -89,43 +88,29 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
return nil
}
var errInvalidUint256 = errors.New("invalid Uint256")
var errDecodeUint256 = errors.New("unable to decode into Uint256")
type Uint256 struct {
*big.Int
}
func isValidUint256(bi *big.Int) bool {
return bi.Cmp(big.NewInt(0)) >= 0 && bi.BitLen() <= 256
}
func stringToUint256(s string) (Uint256, error) {
func stringToUint256(s string) Uint256 {
bi := new(big.Int)
_, ok := bi.SetString(s, 10)
if !ok || !isValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", s)
}
return Uint256{Int: bi}, nil
bi.SetString(s, 10)
return Uint256{Int: bi}
}
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
func sszBytesToUint256(b []byte) (Uint256, error) {
bi := bytesutil.LittleEndianBytesToBigInt(b)
if !isValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", b)
}
return Uint256{Int: bi}, nil
func sszBytesToUint256(b []byte) Uint256 {
bi := new(big.Int)
return Uint256{Int: bi.SetBytes(bytesutil.ReverseByteOrder(b))}
}
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
func (s Uint256) SSZBytes() []byte {
if !isValidUint256(s.Int) {
return []byte{}
}
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
func (s *Uint256) UnmarshalJSON(t []byte) error {
start := 0
end := len(t)
@@ -144,10 +129,7 @@ func (s *Uint256) UnmarshalText(t []byte) error {
}
z, ok := s.SetString(string(t), 10)
if !ok {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
}
if !isValidUint256(z) {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
return errors.Wrapf(errUnmarshalUint256Failed, "value=%s", string(t))
}
s.Int = z
return nil
@@ -164,9 +146,6 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
}
func (s Uint256) MarshalText() ([]byte, error) {
if !isValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
}
return []byte(s.String()), nil
}
@@ -182,16 +161,12 @@ func (s Uint64String) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%d", s)), nil
}
type VersionResponse struct {
Version string `json:"version"`
}
type ExecHeaderResponse struct {
Version string `json:"version"`
Version string `json:"version,omitempty"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBid `json:"message"`
} `json:"data"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *BuilderBid `json:"message,omitempty"`
} `json:"data,omitempty"`
}
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
@@ -237,35 +212,31 @@ func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
}
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
Header *ExecutionPayloadHeader `json:"header,omitempty"`
Value Uint256 `json:"value,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
*v1.ExecutionPayloadHeader
}
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeader
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeader.ParentHash,
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
@@ -278,7 +249,7 @@ func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
ExtraData: h.ExecutionPayloadHeader.ExtraData,
BaseFeePerGas: baseFeePerGas,
BaseFeePerGas: sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas),
BlockHash: h.ExecutionPayloadHeader.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
})
@@ -298,25 +269,25 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
}
type ExecPayloadResponse struct {
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
Version string `json:"version,omitempty"`
Data ExecutionPayload `json:"data,omitempty"`
}
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
}
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
@@ -346,254 +317,6 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
}, nil
}
// FromProto converts a proto execution payload type to our builder
// compatible payload type.
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayload{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
return ExecutionPayload{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
}, nil
}
// FromProtoCapella converts a proto execution payload type for capella to our
// builder compatible payload type.
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayloadCapella{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
for i, w := range payload.Withdrawals {
withdrawals[i] = Withdrawal{
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
Address: w.Address,
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
}
}
return ExecutionPayloadCapella{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidCapella `json:"message"`
} `json:"data"`
}
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidCapella{
Message: bb,
Signature: ehr.Data.Signature,
}, nil
}
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
return &eth.BuilderBidCapella{
Header: header,
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
}
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
return &v1.ExecutionPayloadHeaderCapella{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
WithdrawalsRoot: h.WithdrawalsRoot,
}, nil
}
type BuilderBidCapella struct {
Header *ExecutionPayloadHeaderCapella `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
*v1.ExecutionPayloadHeaderCapella
}
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderCapella
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeaderCapella.ParentHash,
FeeRecipient: h.ExecutionPayloadHeaderCapella.FeeRecipient,
StateRoot: h.ExecutionPayloadHeaderCapella.StateRoot,
ReceiptsRoot: h.ExecutionPayloadHeaderCapella.ReceiptsRoot,
LogsBloom: h.ExecutionPayloadHeaderCapella.LogsBloom,
PrevRandao: h.ExecutionPayloadHeaderCapella.PrevRandao,
BlockNumber: Uint64String(h.ExecutionPayloadHeaderCapella.BlockNumber),
GasLimit: Uint64String(h.ExecutionPayloadHeaderCapella.GasLimit),
GasUsed: Uint64String(h.ExecutionPayloadHeaderCapella.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeaderCapella.Timestamp),
ExtraData: h.ExecutionPayloadHeaderCapella.ExtraData,
BaseFeePerGas: baseFeePerGas,
BlockHash: h.ExecutionPayloadHeaderCapella.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeaderCapella.TransactionsRoot,
WithdrawalsRoot: h.ExecutionPayloadHeaderCapella.WithdrawalsRoot,
})
}
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderCapella
uc := &UnmarshalCaller{}
if err := json.Unmarshal(b, uc); err != nil {
return err
}
ep := ExecutionPayloadHeaderCapella(*uc)
*h = ep
var err error
h.ExecutionPayloadHeaderCapella, err = h.ToProto()
return err
}
type ExecPayloadResponseCapella struct {
Version string `json:"version"`
Data ExecutionPayloadCapella `json:"data"`
}
type ExecutionPayloadCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
Withdrawals []Withdrawal `json:"withdrawals"`
}
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
return r.Data.ToProto()
}
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = p.Transactions[i]
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: w.Address,
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadCapella{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
type Withdrawal struct {
Index Uint256 `json:"index"`
ValidatorIndex Uint256 `json:"validator_index"`
Address hexutil.Bytes `json:"address"`
Amount Uint256 `json:"amount"`
}
type SignedBlindedBeaconBlockBellatrix struct {
*eth.SignedBlindedBeaconBlockBellatrix
}
@@ -608,8 +331,8 @@ type BlindedBeaconBlockBodyBellatrix struct {
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
@@ -619,10 +342,10 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
@@ -638,8 +361,8 @@ type ProposerSlashing struct {
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
}{
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
@@ -652,8 +375,8 @@ type SignedBeaconBlockHeader struct {
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Header *BeaconBlockHeader `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
Signature: h.SignedBeaconBlockHeader.Signature,
@@ -666,11 +389,11 @@ type BeaconBlockHeader struct {
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
BodyRoot hexutil.Bytes `json:"body_root"`
Slot string `json:"slot,omitempty"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
}{
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
@@ -690,9 +413,9 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
}
return json.Marshal(struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature"`
AttestingIndices []string `json:"attesting_indices,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
AttestingIndices: indices,
Data: &AttestationData{a.IndexedAttestation.Data},
@@ -706,8 +429,8 @@ type AttesterSlashing struct {
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1"`
Attestation2 *IndexedAttestation `json:"attestation_2"`
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
}{
Attestation1: &IndexedAttestation{s.Attestation_1},
Attestation2: &IndexedAttestation{s.Attestation_2},
@@ -720,8 +443,8 @@ type Checkpoint struct {
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
Root hexutil.Bytes `json:"root"`
Epoch string `json:"epoch,omitempty"`
Root hexutil.Bytes `json:"root,omitempty"`
}{
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
Root: c.Checkpoint.Root,
@@ -734,11 +457,11 @@ type AttestationData struct {
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
Index string `json:"index"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
Source *Checkpoint `json:"source"`
Target *Checkpoint `json:"target"`
Slot string `json:"slot,omitempty"`
Index string `json:"index,omitempty"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
Source *Checkpoint `json:"source,omitempty"`
Target *Checkpoint `json:"target,omitempty"`
}{
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
@@ -754,9 +477,9 @@ type Attestation struct {
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
}{
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
Data: &AttestationData{a.Attestation.Data},
@@ -770,10 +493,10 @@ type DepositData struct {
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature hexutil.Bytes `json:"signature"`
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
Amount string `json:"amount,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
PublicKey: d.PublicKey,
WithdrawalCredentials: d.WithdrawalCredentials,
@@ -806,8 +529,8 @@ type SignedVoluntaryExit struct {
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *VoluntaryExit `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Signature: sve.SignedVoluntaryExit.Signature,
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
@@ -820,8 +543,8 @@ type VoluntaryExit struct {
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
ValidatorIndex string `json:"validator_index"`
Epoch string `json:"epoch,omitempty"`
ValidatorIndex string `json:"validator_index,omitempty"`
}{
Epoch: fmt.Sprintf("%d", ve.Epoch),
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
@@ -834,8 +557,8 @@ type SyncAggregate struct {
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
}{
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
@@ -848,9 +571,9 @@ type Eth1Data struct {
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root"`
DepositCount string `json:"deposit_count"`
BlockHash hexutil.Bytes `json:"block_hash"`
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
DepositCount string `json:"deposit_count,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
}{
DepositRoot: e.DepositRoot,
DepositCount: fmt.Sprintf("%d", e.DepositCount),
@@ -880,16 +603,16 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
Attestations []*Attestation `json:"attestations,omitempty"`
Deposits []*Deposit `json:"deposits,omitempty"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
@@ -904,126 +627,6 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
})
}
type SignedBLSToExecutionChange struct {
*eth.SignedBLSToExecutionChange
}
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BLSToExecutionChange `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Signature: ch.Signature,
Message: &BLSToExecutionChange{ch.Message},
})
}
type BLSToExecutionChange struct {
*eth.BLSToExecutionChange
}
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
ValidatorIndex string `json:"validator_index"`
FromBlsPubkey hexutil.Bytes `json:"from_bls_pubkey"`
ToExecutionAddress hexutil.Bytes `json:"to_execution_address"`
}{
ValidatorIndex: fmt.Sprintf("%d", ch.ValidatorIndex),
FromBlsPubkey: ch.FromBlsPubkey,
ToExecutionAddress: ch.ToExecutionAddress,
})
}
type SignedBlindedBeaconBlockCapella struct {
*eth.SignedBlindedBeaconBlockCapella
}
type BlindedBeaconBlockCapella struct {
*eth.BlindedBeaconBlockCapella
}
type BlindedBeaconBlockBodyCapella struct {
*eth.BlindedBeaconBlockBodyCapella
}
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockCapella `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &BlindedBeaconBlockCapella{b.Block},
Signature: b.Signature,
})
}
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyCapella `json:"body"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
ParentRoot: b.ParentRoot,
StateRoot: b.StateRoot,
Body: &BlindedBeaconBlockBodyCapella{b.Body},
})
}
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
for i := range b.VoluntaryExits {
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
}
deps := make([]*Deposit, len(b.Deposits))
for i := range b.Deposits {
deps[i] = &Deposit{Deposit: b.Deposits[i]}
}
atts := make([]*Attestation, len(b.Attestations))
for i := range b.Attestations {
atts[i] = &Attestation{Attestation: b.Attestations[i]}
}
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
for i := range b.AttesterSlashings {
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
}
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
for i := range b.ProposerSlashings {
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
}
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
for i := range b.BlsToExecutionChanges {
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.Eth1Data},
Graffiti: b.Graffiti,
ProposerSlashings: pros,
AttesterSlashings: atsl,
Attestations: atts,
Deposits: deps,
VoluntaryExits: sve,
BLSToExecutionChanges: chs,
SyncAggregate: &SyncAggregate{b.SyncAggregate},
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
})
}
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`

View File

@@ -14,10 +14,9 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/go-bitfield"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func ezDecode(t *testing.T, s string) []byte {
@@ -88,62 +87,6 @@ var testExampleHeaderResponse = `{
}
}`
var testExampleHeaderResponseCapella = `{
"version": "capella",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseUnknownVersion = `{
"version": "bad",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
hr := &ExecHeaderResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
@@ -243,115 +186,9 @@ func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
}
}
func TestExecutionHeaderResponseCapellaUnmarshal(t *testing.T) {
hr := &ExecHeaderResponseCapella{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), hr))
cases := []struct {
expected string
actual string
name string
}{
{
expected: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
actual: hexutil.Encode(hr.Data.Signature),
name: "Signature",
},
{
expected: "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
actual: hexutil.Encode(hr.Data.Message.Pubkey),
name: "ExecHeaderResponse.Pubkey",
},
{
expected: "652312848583266388373324160190187140051835877600158453279131187530910662656",
actual: hr.Data.Message.Value.String(),
name: "ExecHeaderResponse.Value",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
},
{
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
},
{
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
},
{
expected: "1",
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
},
{
expected: "1",
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
},
{
expected: "1",
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
},
{
expected: "1",
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
},
{
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(hr.Data.Message.Header.WithdrawalsRoot),
name: "ExecHeaderResponse.ExecutionPayloadHeader.WithdrawalsRoot",
},
}
for _, c := range cases {
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
}
}
func TestExecutionHeaderResponseToProto(t *testing.T) {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
v, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
v := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
hr := &ExecHeaderResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
p, err := hr.ToProto()
@@ -405,67 +242,6 @@ func TestExecutionHeaderResponseToProto(t *testing.T) {
require.DeepEqual(t, expected, p)
}
func TestExecutionHeaderResponseCapellaToProto(t *testing.T) {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
v, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
hr := &ExecHeaderResponseCapella{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), hr))
p, err := hr.ToProto()
require.NoError(t, err)
signature, err := hexutil.Decode("0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
require.NoError(t, err)
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
require.NoError(t, err)
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
txRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
withdrawalsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
expected := &eth.SignedBuilderBidCapella{
Message: &eth.BuilderBidCapella{
Header: &v1.ExecutionPayloadHeaderCapella{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: prevRandao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: extraData,
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: blockHash,
TransactionsRoot: txRoot,
WithdrawalsRoot: withdrawalsRoot,
},
Value: v.SSZBytes(),
Pubkey: pubkey,
},
Signature: signature,
}
require.DeepEqual(t, expected, p)
}
var testExampleExecutionPayload = `{
"version": "bellatrix",
"data": {
@@ -488,36 +264,6 @@ var testExampleExecutionPayload = `{
}
}`
var testExampleExecutionPayloadCapella = `{
"version": "capella",
"data": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions": [
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
],
"withdrawals": [
{
"index": "1",
"validator_index": "1",
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
"amount": "1"
}
]
}
}`
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
epr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
@@ -600,95 +346,6 @@ func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
}
func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
epr := &ExecPayloadResponseCapella{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
cases := []struct {
expected string
actual string
name string
}{
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ParentHash),
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
},
{
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
actual: hexutil.Encode(epr.Data.FeeRecipient),
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.StateRoot),
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
},
{
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
actual: hexutil.Encode(epr.Data.LogsBloom),
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.PrevRandao),
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExtraData),
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
},
{
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.BlockHash),
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
},
}
for _, c := range cases {
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
}
require.Equal(t, 1, len(epr.Data.Transactions))
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
require.Equal(t, 1, len(epr.Data.Withdrawals))
w := epr.Data.Withdrawals[0]
assert.Equal(t, uint64(1), w.Index.Uint64())
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
assert.Equal(t, uint64(1), w.Amount.Uint64())
}
func TestExecutionPayloadResponseToProto(t *testing.T) {
hr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
@@ -716,8 +373,7 @@ func TestExecutionPayloadResponseToProto(t *testing.T) {
require.NoError(t, err)
txList := [][]byte{tx}
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
expected := &v1.ExecutionPayload{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
@@ -737,65 +393,6 @@ func TestExecutionPayloadResponseToProto(t *testing.T) {
require.DeepEqual(t, expected, p)
}
func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
hr := &ExecPayloadResponseCapella{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), hr))
p, err := hr.ToProto()
require.NoError(t, err)
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
require.NoError(t, err)
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
require.NoError(t, err)
txList := [][]byte{tx}
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
require.NoError(t, err)
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
expected := &v1.ExecutionPayloadCapella{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: prevRandao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: extraData,
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: blockHash,
Transactions: txList,
Withdrawals: []*v1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: address,
Amount: 1,
},
},
}
require.DeepEqual(t, expected, p)
}
func pbEth1Data() *eth.Eth1Data {
return &eth.Eth1Data{
DepositRoot: make([]byte, 32),
@@ -984,8 +581,7 @@ func TestProposerSlashings(t *testing.T) {
}
func pbExecutionPayloadHeader(t *testing.T) *v1.ExecutionPayloadHeader {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
return &v1.ExecutionPayloadHeader{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
@@ -1004,28 +600,6 @@ func pbExecutionPayloadHeader(t *testing.T) *v1.ExecutionPayloadHeader {
}
}
func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCapella {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
return &v1.ExecutionPayloadHeaderCapella{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
}
}
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeader{
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
@@ -1036,16 +610,6 @@ func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
require.Equal(t, expected, string(b))
}
func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeaderCapella{
ExecutionPayloadHeaderCapella: pbExecutionPayloadHeaderCapella(t),
}
b, err := json.Marshal(h)
require.NoError(t, err)
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
require.Equal(t, expected, string(b))
}
var testBuilderBid = `{
"version":"bellatrix",
"data":{
@@ -1097,49 +661,6 @@ func TestMathBigUnmarshal(t *testing.T) {
require.NoError(t, u256.UnmarshalText([]byte("452312848583266388373324160190187140051835877600158453279131187530910662656")))
}
func TestIsValidUint256(t *testing.T) {
value, ok := new(big.Int), false
// negative uint256.max - 1
_, ok = value.SetString("-10000000000000000000000000000000000000000000000000000000000000000", 16)
require.Equal(t, true, ok)
require.Equal(t, 257, value.BitLen())
require.Equal(t, false, isValidUint256(value))
// negative uint256.max
_, ok = value.SetString("-ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
require.Equal(t, true, ok)
require.Equal(t, 256, value.BitLen())
require.Equal(t, false, isValidUint256(value))
// negative number
_, ok = value.SetString("-1", 16)
require.Equal(t, true, ok)
require.Equal(t, false, isValidUint256(value))
// uint256.min
_, ok = value.SetString("0", 16)
require.Equal(t, true, ok)
require.Equal(t, true, isValidUint256(value))
// positive number
_, ok = value.SetString("1", 16)
require.Equal(t, true, ok)
require.Equal(t, true, isValidUint256(value))
// uint256.max
_, ok = value.SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
require.Equal(t, true, ok)
require.Equal(t, 256, value.BitLen())
require.Equal(t, true, isValidUint256(value))
// uint256.max + 1
_, ok = value.SetString("10000000000000000000000000000000000000000000000000000000000000000", 16)
require.Equal(t, true, ok)
require.Equal(t, 257, value.BitLen())
require.Equal(t, false, isValidUint256(value))
}
func TestUint256Unmarshal(t *testing.T) {
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
bi := new(big.Int)
@@ -1156,36 +677,6 @@ func TestUint256Unmarshal(t *testing.T) {
require.Equal(t, expected, string(m))
}
func TestUint256UnmarshalNegative(t *testing.T) {
m := "-1"
var value Uint256
err := value.UnmarshalText([]byte(m))
require.ErrorContains(t, "unable to decode into Uint256", err)
}
func TestUint256UnmarshalMin(t *testing.T) {
m := "0"
var value Uint256
err := value.UnmarshalText([]byte(m))
require.NoError(t, err)
}
func TestUint256UnmarshalMax(t *testing.T) {
// 2**256-1 (uint256.max)
m := "115792089237316195423570985008687907853269984665640564039457584007913129639935"
var value Uint256
err := value.UnmarshalText([]byte(m))
require.NoError(t, err)
}
func TestUint256UnmarshalTooBig(t *testing.T) {
// 2**256 (one more than uint256.max)
m := "115792089237316195423570985008687907853269984665640564039457584007913129639936"
var value Uint256
err := value.UnmarshalText([]byte(m))
require.ErrorContains(t, "unable to decode into Uint256", err)
}
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
expected, err := os.ReadFile("testdata/blinded-block.json")
require.NoError(t, err)
@@ -1215,43 +706,12 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
}
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
require.NoError(t, err)
b := &BlindedBeaconBlockCapella{BlindedBeaconBlockCapella: &eth.BlindedBeaconBlockCapella{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyCapella{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: pbEth1Data(),
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{pbProposerSlashing(t)},
AttesterSlashings: []*eth.AttesterSlashing{pbAttesterSlashing(t)},
Attestations: []*eth.Attestation{pbAttestation(t)},
Deposits: []*eth.Deposit{pbDeposit(t)},
VoluntaryExits: []*eth.SignedVoluntaryExit{pbSignedVoluntaryExit(t)},
SyncAggregate: pbSyncAggregate(),
ExecutionPayloadHeader: pbExecutionPayloadHeaderCapella(t),
},
}}
m, err := json.Marshal(b)
require.NoError(t, err)
// string error output is easier to deal with
// -1 end slice index on expected is to get rid of trailing newline
// if you update this fixture and this test breaks, you probably removed the trailing newline
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
}
func TestRoundTripUint256(t *testing.T) {
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
u, err := stringToUint256(vs)
require.NoError(t, err)
u := stringToUint256(vs)
sb := u.SSZBytes()
require.Equal(t, 32, len(sb))
uu, err := sszBytesToUint256(sb)
require.NoError(t, err)
uu := sszBytesToUint256(sb)
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
require.Equal(t, vs, uu.String())
}
@@ -1279,16 +739,6 @@ func TestExecutionPayloadHeaderRoundtrip(t *testing.T) {
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
}
func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
expected, err := os.ReadFile("testdata/execution-payload-capella.json")
require.NoError(t, err)
hu := &ExecutionPayloadHeaderCapella{}
require.NoError(t, json.Unmarshal(expected, hu))
m, err := json.Marshal(hu)
require.NoError(t, err)
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
}
func TestErrorMessage_non200Err(t *testing.T) {
mockRequest := &http.Request{
URL: &url.URL{Path: "example.com"},
@@ -1375,4 +825,5 @@ func TestErrorMessage_non200Err(t *testing.T) {
}
})
}
}

View File

@@ -8,7 +8,7 @@ go_library(
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway",
importpath = "github.com/prysmaticlabs/prysm/v3/api/gateway",
visibility = [
"//beacon-chain:__subpackages__",
"//validator:__subpackages__",

View File

@@ -10,7 +10,7 @@ go_library(
"process_request.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
importpath = "github.com/prysmaticlabs/prysm/v3/api/gateway/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api/grpc:go_default_library",

View File

@@ -7,7 +7,7 @@ import (
"strings"
"github.com/gorilla/mux"
butil "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
butil "github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/wealdtech/go-bytesutil"
)

View File

@@ -6,8 +6,8 @@ import (
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestHandleURLParameters(t *testing.T) {

View File

@@ -10,7 +10,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v3/api/grpc"
)
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.

View File

@@ -8,9 +8,9 @@ import (
"strings"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/api/grpc"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/sirupsen/logrus/hooks/test"
)

View File

@@ -13,8 +13,8 @@ import (
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/runtime"
"github.com/prysmaticlabs/prysm/v3/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v3/runtime"
"github.com/rs/cors"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
@@ -70,16 +70,15 @@ type Gateway struct {
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
g := &Gateway{
ctx: ctx,
cfg: &config{},
cfg: &config{
router: mux.NewRouter(),
},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
g.cfg.router = mux.NewRouter()
}
return g, nil
}

View File

@@ -10,10 +10,10 @@ import (
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)

View File

@@ -5,11 +5,16 @@ import (
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v3/api/gateway/apimiddleware"
)
type Option func(g *Gateway) error
func (g *Gateway) SetRouter(r *mux.Router) *Gateway {
g.cfg.router = r
return g
}
func WithPbHandlers(handlers []*PbMux) Option {
return func(g *Gateway) error {
g.cfg.pbHandlers = handlers

View File

@@ -6,7 +6,7 @@ go_library(
"grpcutils.go",
"parameters.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/grpc",
importpath = "github.com/prysmaticlabs/prysm/v3/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -7,8 +7,8 @@ import (
"testing"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["pagination.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/pagination",
importpath = "github.com/prysmaticlabs/prysm/v3/api/pagination",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",

View File

@@ -6,7 +6,7 @@ import (
"strconv"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v3/config/params"
)
// StartAndEndPage takes in the requested page token, wanted page size, total page size.

View File

@@ -3,9 +3,9 @@ package pagination_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/api/pagination"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/api/pagination"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestStartAndEndPage(t *testing.T) {

View File

@@ -8,7 +8,7 @@ go_library(
"multilock.go",
"scatter.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async",
importpath = "github.com/prysmaticlabs/prysm/v3/async",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["abool.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/async/abool",
importpath = "github.com/prysmaticlabs/prysm/v3/async/abool",
visibility = ["//visibility:public"],
)

View File

@@ -6,8 +6,8 @@ import (
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/async"
"github.com/prysmaticlabs/prysm/v3/testing/require"
log "github.com/sirupsen/logrus"
)

View File

@@ -7,10 +7,10 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v3/async"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestDebounce_NoEvents(t *testing.T) {

View File

@@ -6,7 +6,7 @@ go_library(
"feed.go",
"subscription.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async/event",
importpath = "github.com/prysmaticlabs/prysm/v3/async/event",
visibility = ["//visibility:public"],
deps = ["//time/mclock:go_default_library"],
)

View File

@@ -19,7 +19,7 @@ package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v3/async/event"
)
func ExampleFeed_acknowledgedEvents() {

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v3/async/event"
)
// This example demonstrates how SubscriptionScope can be used to control the lifetime of

View File

@@ -19,7 +19,7 @@ package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v3/async/event"
)
func ExampleNewSubscription() {

View File

@@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
)
func TestFeedPanics(t *testing.T) {

View File

@@ -21,7 +21,7 @@ import (
"sync"
"time"
"github.com/prysmaticlabs/prysm/v4/time/mclock"
"github.com/prysmaticlabs/prysm/v3/time/mclock"
)
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full

View File

@@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
var errInts = errors.New("error in subscribeInts")

View File

@@ -6,7 +6,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v3/async"
)
func TestEveryRuns(t *testing.T) {

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

View File

@@ -14,7 +14,7 @@ type WorkerResults struct {
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {

View File

@@ -5,9 +5,9 @@ import (
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/async"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestDouble(t *testing.T) {

View File

@@ -4,10 +4,8 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"chain_info_forkchoice.go",
"error.go",
"execution_engine.go",
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"init_sync_process_block.go",
@@ -23,9 +21,10 @@ go_library(
"receive_attestation.go",
"receive_block.go",
"service.go",
"state_balance_cache.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain",
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
@@ -52,22 +51,19 @@ go_library(
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -107,7 +103,6 @@ go_test(
"chain_info_test.go",
"checktags_test.go",
"execution_engine_test.go",
"forkchoice_update_execution_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"init_test.go",
@@ -120,7 +115,6 @@ go_test(
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
"setup_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -138,8 +132,9 @@ go_test(
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -170,7 +165,6 @@ go_test(
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
"setup_test.go",
],
embed = [":go_default_library"],
gc_goopts = [

View File

@@ -5,19 +5,18 @@ import (
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
@@ -29,31 +28,18 @@ type ChainInfoFetcher interface {
CanonicalFetcher
ForkFetcher
HeadDomainFetcher
ForkchoiceFetcher
}
// ForkchoiceFetcher defines a common interface for methods that access directly
// forkchoice information. These typically require a lock and external callers
// are requested to call methods within this blockchain package that takes care
// of locking forkchoice
type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(uint64)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
// HeadUpdater defines a common interface for methods in blockchain service
// which allow to update the head info
type HeadUpdater interface {
UpdateHead(context.Context) error
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
CurrentSlot() primitives.Slot
CurrentSlot() types.Slot
}
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
@@ -64,34 +50,28 @@ type GenesisFetcher interface {
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieve head related data.
type HeadFetcher interface {
HeadSlot() primitives.Slot
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error)
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadGenesisValidatorsRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []primitives.Slot)
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []types.Slot)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
type ForkFetcher interface {
ForkChoicer() forkchoice.ForkChoicer
CurrentFork() *ethpb.Fork
GenesisFetcher
TimeFetcher
}
// TemporalOracle is like ForkFetcher minus CurrentFork()
type TemporalOracle interface {
GenesisFetcher
TimeFetcher
}
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
@@ -103,9 +83,7 @@ type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
UnrealizedJustifiedPayloadBlockHash() [32]byte
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
@@ -117,30 +95,30 @@ type OptimisticModeFetcher interface {
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
cp := s.ForkChoicer().FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
cp := s.ForkChoicer().JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// BestJustifiedCheckpt returns the best justified checkpoint from store.
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
cp := s.ForkChoicer().BestJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// HeadSlot returns the slot of the head of the chain.
func (s *Service) HeadSlot() primitives.Slot {
func (s *Service) HeadSlot() types.Slot {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -179,7 +157,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -209,35 +187,13 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadStateReadOnly returns the read only head state of the chain.
// If the head is nil from service struct, it will attempt to get the
// head state from DB. Any callers of this method MUST only use the
// state instance to read fields from the state. Any type assertions back
// to the concrete type and subsequent use of it could lead to corruption
// of the state.
func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.HeadStateReadOnly")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headStateReadOnly(ctx), nil
}
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return []primitives.ValidatorIndex{}, nil
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
}
@@ -298,8 +254,6 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
@@ -309,8 +263,14 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
return s.cfg.ForkChoiceStore.Tips()
}
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
@@ -320,7 +280,7 @@ func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLen
}
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
@@ -333,8 +293,13 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
return v.PublicKey(), nil
}
// ForkChoicer returns the forkchoice interface.
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
@@ -342,13 +307,14 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
headRoot := s.head.root
s.headLock.RUnlock()
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if s.cfg.ForkChoiceStore.AllTipsAreInvalid() {
return true, nil
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return true, err
}
// If fockchoice does not have the headroot, then the node is considered
@@ -359,46 +325,20 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
// IsFinalized returns true if the input root is finalized.
// It first checks latest finalized root then checks finalized root index in DB.
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
return true
}
// If node exists in our store, then it is not
// finalized.
if s.cfg.ForkChoiceStore.HasNode(root) {
return false
}
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
}
// InForkchoice returns true if the given root is found in forkchoice
// This in particular means that the blockroot is a descendant of the
// finalized checkpoint
func (s *Service) InForkchoice(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
s.cfg.ForkChoiceStore.RUnlock()
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return false, err
}
// if the requested root is the headroot and the root is not found in
@@ -417,10 +357,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
}
if ss == nil {
ss, err = s.recoverStateSummary(ctx, root)
if err != nil {
return true, err
}
return true, errInvalidNilSummary
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
@@ -446,10 +383,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, err
}
if lastValidated == nil {
lastValidated, err = s.recoverStateSummary(ctx, root)
if err != nil {
return false, err
}
return false, errInvalidNilSummary
}
if ss.Slot > lastValidated.Slot {
@@ -458,57 +392,12 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return !isCanonical, nil
}
// Ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
//
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
s.cfg.ForkChoiceStore.RLock()
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
s.cfg.ForkChoiceStore.RUnlock()
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
ar, err = s.ancestorByDB(ctx, r, slot)
if err != nil {
return nil, err
}
}
return ar[:], nil
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, err
}
summary := &ethpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
return nil, err
}
return summary, nil
}
return nil, errBlockDoesNotExist
// ForkChoiceStore returns the fork choice store in the service.
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}

View File

@@ -1,94 +0,0 @@
package blockchain
import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
)
// CachedHeadRoot returns the corresponding value from Forkchoice
func (s *Service) CachedHeadRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.CachedHeadRoot()
}
// GetProposerHead returns the corresponding value from forkchoice
func (s *Service) GetProposerHead() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
}
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
}
// InsertNode is a wrapper for node insertion which is self locked
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
}
// NewSlot returns the corresponding value from forkchoice
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
}
// ProposerBoost wraps the corresponding method from forkchoice
func (s *Service) ProposerBoost() [32]byte {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.ProposerBoost()
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}

View File

@@ -4,14 +4,12 @@ import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestHeadSlot_DataRace(t *testing.T) {
@@ -34,7 +32,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -56,7 +54,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wsb},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -76,15 +74,12 @@ func TestHeadBlock_DataRace(t *testing.T) {
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))

View File

@@ -5,21 +5,24 @@ import (
"testing"
"time"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
v3 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v3"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"google.golang.org/protobuf/proto"
)
@@ -32,7 +35,7 @@ var _ ForkFetcher = (*Service)(nil)
// insert into forkchoice
func prepareForkchoiceState(
_ context.Context,
slot primitives.Slot,
slot types.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
payloadHash [32]byte,
@@ -58,7 +61,7 @@ func prepareForkchoiceState(
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
st, err := v3.InitializeFromProto(base)
return st, blockRoot, err
}
@@ -70,9 +73,23 @@ func TestHeadRoot_Nil(t *testing.T) {
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
func TestService_ForkChoiceStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
p := c.ForkChoiceStore()
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -81,73 +98,50 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
cp = service.CurrentJustifiedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
// check that forkchoice has the right genesis root as the node root
root, err := fcs.Head(ctx)
root, err := fcs.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, service.originBlockRoot, root)
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, jroot))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, cp))
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(cp))
jp := service.CurrentJustifiedCheckpt()
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
}
func TestFinalizedBlockHash(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
r := [32]byte{'f'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
h := service.FinalizedBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
}
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := context.Background()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
h := service.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetSlot(100)
c.head = &head{block: b, state: s}
assert.Equal(t, primitives.Slot(100), c.HeadSlot())
c.head = &head{slot: 100, state: s}
assert.Equal(t, types.Slot(100), c.HeadSlot())
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -157,8 +151,16 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_UseDB(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
@@ -177,7 +179,7 @@ func TestHeadRoot_UseDB(t *testing.T) {
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -192,13 +194,13 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
}
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
@@ -209,7 +211,7 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Fork: f})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -234,7 +236,7 @@ func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
@@ -248,7 +250,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Eth1Data: d})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -304,22 +306,12 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
}
//
// A <- B <- C
// \ \
// \ ---------- E
// ---------- D
func TestService_ChainHeads(t *testing.T) {
func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -335,9 +327,44 @@ func TestService_ChainHeads(t *testing.T) {
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
//
// A <- B <- C
// \ \
// \ ---------- E
// ---------- D
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
rootMap := map[[32]byte]primitives.Slot{{'c'}: 102, {'d'}: 103, {'e'}: 104}
rootMap := map[[32]byte]types.Slot{{'c'}: 102, {'d'}: 103, {'e'}: 104}
for i, root := range roots {
slot, ok := rootMap[root]
require.Equal(t, true, ok)
@@ -358,7 +385,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
require.Equal(t, true, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
@@ -367,12 +394,12 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), idx)
require.Equal(t, types.ValidatorIndex(0), idx)
c.head = &head{state: nil}
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
@@ -403,7 +430,29 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic(t *testing.T) {
func TestService_IsOptimistic_ProtoArray(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
@@ -412,7 +461,7 @@ func TestService_IsOptimistic(t *testing.T) {
ctx := context.Background()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
@@ -433,9 +482,9 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -450,10 +499,27 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -477,10 +543,74 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
@@ -506,7 +636,7 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -542,31 +672,12 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
_, err = c.IsOptimisticForRoot(ctx, br)
assert.NoError(t, err)
summ, err := beaconDB.StateSummary(ctx, br)
assert.NoError(t, err)
assert.NotNil(t, summ)
assert.Equal(t, 10, int(summ.Slot))
assert.DeepEqual(t, br[:], summ.Root)
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
r1 := [32]byte{'a'}
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Root: r1,
}))
b := util.NewBeaconBlock()

View File

@@ -8,28 +8,29 @@ var (
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
ErrUndefinedExecutionEngineError = errors.New("received an undefined execution engine error")
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
errNilJustifiedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// block is not a valid optimistic candidate block
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
errNilStateFromStategen = errors.New("justified state can't be nil")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestIsInvalidBlock(t *testing.T) {

View File

@@ -5,23 +5,20 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -32,7 +29,7 @@ var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.ReadOnlyBeaconBlock
headBlock interfaces.BeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -61,8 +58,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithError(err).Error("Could not get execution payload for head block")
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
@@ -70,7 +67,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
if err != nil {
log.WithError(err).Error("Could not get head payload attribute")
return nil, nil
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
@@ -82,14 +83,18 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
err := s.optimisticCandidateBlock(ctx, headBlk)
if err != nil {
log.WithError(err).Error("Optimistic block failed to be candidate")
}
return payloadID, nil
case execution.ErrInvalidPayloadStatus:
forkchoiceUpdatedInvalidNodeCount.Inc()
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
@@ -99,12 +104,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, nil
}
r, err := s.cfg.ForkChoiceStore.Head(ctx)
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
if err != nil {
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks, could not update head root")
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
}
@@ -132,10 +137,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidCount": len(invalidRoots),
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
}).Warn("Pruned invalid blocks")
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
@@ -149,8 +154,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithError(err).Error("Could not set head root to valid")
return nil, nil
}
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
if hasAttr && payloadID != nil {
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
@@ -180,10 +184,10 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
return bytesutil.ToBytes32(payload.BlockHash()), nil
}
// notifyNewPayload signals execution engine on a new payload.
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
postStateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
@@ -218,14 +222,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
return false, s.optimisticCandidateBlock(ctx, blk.Block())
case execution.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(blk.Block().ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
@@ -233,9 +237,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
@@ -249,30 +253,55 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
}
}
// optimisticCandidateBlock returns an error if this block can't be optimistically synced.
// It replaces boolean in spec code with `errNotOptimisticCandidate`.
//
// Spec pseudocode definition:
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
// if is_execution_block(opt_store.blocks[block.parent_root]):
// return True
//
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
// return True
//
// return False
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.BeaconBlock) error {
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return nil
}
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return err
}
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
if err != nil {
return err
}
if parentIsExecutionBlock {
return nil
}
return errNotOptimisticCandidate
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
return false, emptyAttri, 0
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, nil, 0, nil
}
// Get previous randao.
st = st.Copy()
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
return false, nil, 0, err
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return false, emptyAttri, 0
return false, nil, 0, nil
}
// Get fee recipient.
@@ -290,8 +319,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
"Please refer to our documentation for instructions")
}
case err != nil:
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
return false, emptyAttri, 0
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
default:
feeRecipient = recipient
}
@@ -299,44 +327,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return false, emptyAttri, 0
return false, nil, 0, err
}
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri, 0
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return false, emptyAttri, 0
attr := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
}
return true, attr, proposerID
return true, attr, proposerID, nil
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewProposer(slot primitives.Slot) bool {
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
return ok || features.Get().PrepareAllPayloads
}
func (s *Service) isNewHead(r [32]byte) bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
currentHeadRoot := s.originBlockRoot
if s.head != nil {
currentHeadRoot = s.headRoot()
}
return r != currentHeadRoot || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
newHeadBlock, err := s.getBlock(ctx, r)
if err != nil {
return nil, nil, err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, nil, err
}
return headState, newHeadBlock, nil
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
// it returns true if the new head is updated
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead {
return false, nil
}
isNewProposer := s.isNewProposer(proposingSlot)
if isNewProposer && !features.Get().DisableReorgLateBlocks {
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return false, nil
}
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return false, nil
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock.Block(),
})
if err != nil {
return false, errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
// Only need to prune attestations from pool if the head has changed.
if err := s.pruneAttsFromPool(headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return true, nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being
// reorged or not by the next proposer.
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
currentSlot := s.CurrentSlot()
if proposingSlot == currentSlot {
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
if proposerHead != newHeadRoot {
return true
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
params.BeaconConfig().SecondsPerSlot)
lateBlockFailedAttemptSecondThreshold.Inc()
} else {
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
return true
}
secs, err := slots.SecondsSinceSlotStart(currentSlot,
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
}
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
doublylinkedtree.ProcessAttestationsThreshold)
lateBlockFailedAttemptFirstThreshold.Inc()
}
}
return false
}

View File

@@ -1,224 +0,0 @@
package blockchain
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestService_isNewProposer(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
}
func TestService_isNewHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, true, service.isNewHead([32]byte{}))
service.head = &head{root: [32]byte{1}}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{1}))
// Nil head should use origin root
service.head = nil
service.originBlockRoot = [32]byte{3}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{3}))
}
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
st, _ := util.DeterministicGenesisState(t, 1)
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, blk, gotBlk)
}
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
require.NoError(t, err)
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
// Block in Cache
b := util.NewBeaconBlock()
b.Block.Slot = 2
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
b = util.NewBeaconBlock()
b.Block.Slot = 3
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
require.Equal(t, true, has)
require.Equal(t, primitives.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
require.NoError(t, err)
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{}
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, sb))
r, err := sb.Block().HashTreeRoot()
require.NoError(t, err)
// Set head to be the same but proposing next slot
service.head.root = r
service.head.block = sb
service.head.state = st
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
require.NoError(t, err)
}
func TestShouldOverrideFCU(t *testing.T) {
hook := logTest.NewGlobal()
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
require.LogsDoNotContain(t, hook, "12 seconds")
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
require.LogsContain(t, hook, "12 seconds")
head, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, headRoot, head)
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
require.LogsDoNotContain(t, hook, "10 seconds")
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
require.LogsContain(t, hook, "10 seconds")
}

View File

@@ -6,21 +6,20 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/math"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -28,9 +27,14 @@ import (
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
jp := s.CurrentJustifiedCheckpt()
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(jp.Root))
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", jp.Root)
return errors.Wrap(err, msg)
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
return errors.Wrap(err, "could not update head")
}
@@ -47,23 +51,30 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot types.Slot // current head slot.
root [32]byte // current head root.
block interfaces.SignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
// Caller of the method MUST acquire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
if !s.isNewHead(newHeadRoot) {
var oldHeadRoot [32]byte
s.headLock.RLock()
if s.head == nil {
oldHeadRoot = s.originBlockRoot
} else {
oldHeadRoot = s.head.root
}
s.headLock.RUnlock()
if newHeadRoot == oldHeadRoot {
return nil
}
if err := blocks.BeaconBlockIsNil(headBlock); err != nil {
return err
}
@@ -80,7 +91,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
s.headLock.RLock()
oldHeadBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
return errors.Wrap(err, "could not get old head block")
}
oldStateRoot := oldHeadBlock.Block().StateRoot()
@@ -89,43 +99,23 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
r, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
// A chain re-org occurred, so we fire an event notifying the rest of the services.
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
commonRoot = params.BeaconConfig().ZeroHash
}
dis := headSlot + newHeadSlot - 2*forkSlot
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
}
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
"newWeight": newWeight,
"oldSlot": fmt.Sprintf("%d", headSlot),
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
"oldWeight": oldWeight,
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": dis,
"depth": dep,
"distance": headSlot + newHeadSlot - 2*forkSlot,
"depth": math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
}).Info("Chain reorg occurred")
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
@@ -136,14 +126,14 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot[:],
NewHeadState: newStateRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
ExecutionOptimistic: isOptimistic,
},
})
if err := s.saveOrphanedOperations(ctx, oldHeadRoot, newHeadRoot); err != nil {
if err := s.saveOrphanedAtts(ctx, oldHeadRoot, newHeadRoot); err != nil {
return err
}
reorgCount.Inc()
@@ -162,7 +152,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot[:], newHeadRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -173,7 +163,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -196,7 +186,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -206,6 +196,7 @@ func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBl
return err
}
s.head = &head{
slot: block.Block().Slot(),
root: root,
block: bCp,
state: state.Copy(),
@@ -216,7 +207,7 @@ func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBl
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -226,6 +217,7 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
return err
}
s.head = &head{
slot: block.Block().Slot(),
root: root,
block: bCp,
state: state,
@@ -235,11 +227,8 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
// This returns the head slot.
// This is a lock free version.
func (s *Service) headSlot() primitives.Slot {
if s.head == nil || s.head.block == nil || s.head.block.Block() == nil {
return 0
}
return s.head.block.Block().Slot()
func (s *Service) headSlot() types.Slot {
return s.head.slot
}
// This returns the head root.
@@ -256,7 +245,7 @@ func (s *Service) headRoot() [32]byte {
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) headBlock() (interfaces.SignedBeaconBlock, error) {
return s.head.block.Copy()
}
@@ -270,16 +259,6 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
return s.head.state.Copy()
}
// This returns a read only version of the head state.
// It does not perform a copy of the head state.
// This is a lock free version.
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
defer span.End()
return s.head.state
}
// This returns the genesis validators root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorsRoot() [32]byte {
@@ -289,14 +268,14 @@ func (s *Service) headGenesisValidatorsRoot() [32]byte {
// This returns the validator referenced by the provided index in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOnlyValidator, error) {
return s.head.state.ValidatorAtIndexReadOnly(index)
}
// This returns the validator index referenced by the provided pubkey in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
return s.head.state.ValidatorIndexByPubkey(pubKey)
}
@@ -310,7 +289,7 @@ func (s *Service) hasHeadState() bool {
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot primitives.Slot,
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
newHeadRoot []byte,
@@ -318,7 +297,7 @@ func (s *Service) notifyNewHeadEvent(
previousDutyDependentRoot := s.originBlockRoot[:]
currentDutyDependentRoot := s.originBlockRoot[:]
var previousDutyEpoch primitives.Epoch
var previousDutyEpoch types.Epoch
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
if currentDutyEpoch > 0 {
previousDutyEpoch = currentDutyEpoch.Sub(1)
@@ -362,10 +341,10 @@ func (s *Service) notifyNewHeadEvent(
return nil
}
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
@@ -403,30 +382,7 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
}
saveOrphanedAttCount.Inc()
}
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
log.WithError(err).Error("Could not insert reorg attester slashing")
}
}
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
log.WithError(err).Error("Could not insert reorg proposer slashing")
}
}
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
s.cfg.ExitPool.InsertVoluntaryExit(v)
}
if orphanedBlk.Version() >= version.Capella {
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, c := range changes {
s.cfg.BLSToExecPool.InsertBLSToExecChange(c)
}
}
parentRoot := orphanedBlk.Block().ParentRoot()
orphanedRoot = bytesutil.ToBytes32(parentRoot[:])
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
}
return nil
}

View File

@@ -5,17 +5,17 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/async"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
// Initialize the state cache for sync committees.
@@ -24,36 +24,36 @@ var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error)
HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
}
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
// The head sync committee domain functions return callers domain data with respect to slot and head state.
type HeadDomainFetcher interface {
HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
}
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
}
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
}
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
}
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
@@ -61,7 +61,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
// rather than for the range
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
nextSlotEpoch := slots.ToEpoch(slot + 1)
currentEpoch := slots.ToEpoch(slot)
@@ -79,7 +79,7 @@ func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -89,7 +89,7 @@ func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index pri
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -99,7 +99,7 @@ func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primit
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error) {
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -125,7 +125,7 @@ func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.
}
// returns calculated domain using input `domain` and `slot`.
func (s *Service) domainWithHeadState(ctx context.Context, slot primitives.Slot, domain [4]byte) ([]byte, error) {
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -135,7 +135,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot primitives.Slot,
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
var headState state.BeaconState
var err error
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))

View File

@@ -4,15 +4,53 @@ import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.headCurrentSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.headNextSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(0), types.CommitteeIndex(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.HeadSyncCommitteeDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
@@ -20,18 +58,18 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
// Current period
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepEqual(t, a, b)
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepNotEqual(t, a, b)
}
@@ -43,7 +81,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
@@ -57,7 +95,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
@@ -71,7 +109,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.

View File

@@ -7,20 +7,22 @@ import (
"testing"
"time"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -29,12 +31,12 @@ func TestSaveHead_Same(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
r := [32]byte{'A'}
service.head = &head{root: r}
service.head = &head{slot: 0, root: r}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -48,10 +50,11 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
}
@@ -63,11 +66,11 @@ func TestSaveHead_Different(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, bytesutil.ToBytes32(wsb.Block().ParentRoot()), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
@@ -77,7 +80,7 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
@@ -87,7 +90,7 @@ func TestSaveHead_Different(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
@@ -101,16 +104,17 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
}
reorgChainParent := [32]byte{'B'}
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -122,7 +126,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
@@ -132,7 +136,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
@@ -144,12 +148,26 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
ctx := context.Background()
state, _ := util.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
balances, err := service.justifiedBalances.get(ctx, r)
require.NoError(t, err)
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
}
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
@@ -216,46 +234,62 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
}
func TestRetrieveHead_ReadOnly(t *testing.T) {
func TestSaveOrphanedAtts_NoCommonAncestor_Protoarray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
// this test does not make sense in doubly linked tree since it enforces
// that the finalized node is a common ancestor
service.cfg.ForkChoiceStore = protoarray.New()
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
service.head = &head{
root: oldRoot,
block: oldBlock,
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
rOnlyState, err := service.HeadStateReadOnly(ctx)
require.NoError(t, err)
assert.Equal(t, rOnlyState, service.head.state, "Head is not the same object")
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts(t *testing.T) {
@@ -306,11 +340,11 @@ func TestSaveOrphanedAtts(t *testing.T) {
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
@@ -324,25 +358,18 @@ func TestSaveOrphanedAtts(t *testing.T) {
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedOps(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ShardCommitteePeriod = 0
params.OverrideBeaconConfig(config)
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
service.head = &head{state: st}
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
@@ -359,17 +386,6 @@ func TestSaveOrphanedOps(t *testing.T) {
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkConfig.NumProposerSlashings = 1
blkConfig.NumAttesterSlashings = 1
blkConfig.NumVoluntaryExits = 1
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
@@ -378,93 +394,25 @@ func TestSaveOrphanedOps(t *testing.T) {
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
exits, err := service.cfg.ExitPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(exits))
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.cfg.BLSToExecPool = blstoexec.NewPool()
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisStateCapella(t, 64)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkG, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 1)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 10
blk1, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 2)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 15
blk2, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlockCapella()
blkConfig.NumBLSChanges = 0
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
pending, err := service.cfg.BLSToExecPool.PendingBLSToExecChanges()
require.NoError(t, err)
require.Equal(t, 15, len(pending))
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
DisableForkchoiceDoublyLinkedTree: false,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
@@ -511,11 +459,11 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
@@ -530,6 +478,11 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
}
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
DisableForkchoiceDoublyLinkedTree: false,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
@@ -570,18 +523,27 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)
@@ -606,9 +568,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx)
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx, []uint64{1, 2})
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())

View File

@@ -4,13 +4,13 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
)
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock) error {
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
s.initSyncBlocksLock.Lock()
s.initSyncBlocks[r] = b
numBlocks := len(s.initSyncBlocks)
@@ -43,7 +43,7 @@ func (s *Service) hasBlockInInitSyncOrDB(ctx context.Context, r [32]byte) bool {
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
// Error is returned if the block is not found in either cache or DB.
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBeaconBlock, error) {
s.initSyncBlocksLock.RLock()
// Check cache first because it's faster.
@@ -64,11 +64,11 @@ func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.ReadOnly
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
// blocks are unordered.
func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
blks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(s.initSyncBlocks))
blks := make([]interfaces.SignedBeaconBlock, 0, len(s.initSyncBlocks))
for _, b := range s.initSyncBlocks {
blks = append(blks, b)
}
@@ -79,5 +79,5 @@ func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
func (s *Service) clearInitSyncBlocks() {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
s.initSyncBlocks = make(map[[32]byte]interfaces.SignedBeaconBlock)
}

View File

@@ -4,10 +4,10 @@ import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestService_getBlock(t *testing.T) {

View File

@@ -1,7 +1,7 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v3/config/params"
)
func init() {

View File

@@ -6,22 +6,21 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logStateTransitionData(b interfaces.BeaconBlock) error {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -38,41 +37,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() >= version.Altair {
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() >= version.Bellatrix {
if b.Version() == version.Bellatrix {
p, err := b.Body().Execution()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case err != nil:
if err != nil {
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
log = log.WithField("txCount", len(txs))
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
startTime, err := slots.ToTime(genesisTime, block.Slot())
if err != nil {
return err
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
@@ -82,11 +76,10 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
"deposits": len(block.Body().Deposits()),
}).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
@@ -101,7 +94,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
}
// logs payload related data every slot.
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
func logPayload(block interfaces.BeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
@@ -117,24 +110,12 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
fields := logrus.Fields{
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
fields["blsToExecutionChanges"] = len(changes)
}
log.WithFields(fields).Debug("Synced new payload")
}).Debug("Synced new payload")
return nil
}

View File

@@ -3,11 +3,11 @@ package blockchain
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -25,11 +25,11 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
b func() interfaces.ReadOnlyBeaconBlock
b func() interfaces.BeaconBlock
want string
}{
{name: "empty block body",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return wb
@@ -37,7 +37,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
},
{name: "has attestation",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}})
require.NoError(t, err)
return wb
@@ -45,7 +45,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has deposit",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(
&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
@@ -56,7 +56,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}})
require.NoError(t, err)
@@ -65,7 +65,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
},
{name: "has proposer slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}})
require.NoError(t, err)
@@ -74,7 +74,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
},
{name: "has exit",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
require.NoError(t, err)
@@ -83,7 +83,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
},
{name: "has everything",
b: func() interfaces.ReadOnlyBeaconBlock {
b: func() interfaces.BeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}},
@@ -96,7 +96,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
b: func() interfaces.BeaconBlock { return wrappedPayloadBlk },
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}

View File

@@ -6,15 +6,15 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
)
var (
@@ -111,18 +111,6 @@ var (
Name: "beacon_reorgs_total",
Help: "Count the number of times beacon chain has a reorg",
})
LateBlockAttemptedReorgCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_late_block_attempted_reorgs",
Help: "Count the number of times a proposer served by this beacon has attempted a late block reorg",
})
lateBlockFailedAttemptFirstThreshold = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_failed_reorg_attempts_first_threshold",
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the first threshold",
})
lateBlockFailedAttemptSecondThreshold = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_failed_reorg_attempts_second_threshold",
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the second threshold",
})
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "saved_orphaned_att_total",
Help: "Count the number of times an orphaned attestation is saved",
@@ -142,6 +130,14 @@ var (
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are present in the cache.",
})
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_hit",
Help: "Count the number of state balance cache hits.",
})
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_valid_node_count",
Help: "Count the number of valid nodes after newPayload EE call",
@@ -162,61 +158,14 @@ var (
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_invalid_node_count",
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "missed_payload_id_filled_count",
Help: "",
})
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "txs_per_slot_count",
Help: "Count the number of txs per slot",
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "process_attestations_milliseconds",
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newAttHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_att_head_milliseconds",
Help: "Captures latency for new attestation head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newBlockHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_block_head_milliseconds",
Help: "Captures latency for new block head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
reorgDistance = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "reorg_distance",
Help: "Captures distance of reorgs. Distance is defined as the number of blocks between the old head and the new head",
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
},
)
reorgDepth = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "reorg_depth",
Help: "Captures depth of reorgs. Depth is defined as the number of blocks between the head and the common ancestor",
Buckets: []float64{1, 2, 4, 8, 16, 32},
},
)
)
// reportSlotMetrics reports slot related metrics.
func reportSlotMetrics(stateSlot, headSlot, clockSlot primitives.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
clockTimeSlot.Set(float64(clockSlot))
beaconSlot.Set(float64(stateSlot))
beaconHeadSlot.Set(float64(headSlot))
@@ -228,7 +177,7 @@ func reportSlotMetrics(stateSlot, headSlot, clockSlot primitives.Slot, finalized
// reportEpochMetrics reports epoch related metrics.
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
currentEpoch := primitives.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
// Validator instances
pendingInstances := 0
@@ -247,7 +196,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
slashingEffectiveBalance := uint64(0)
for i, validator := range postState.Validators() {
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
if err != nil {
log.WithError(err).Error("Could not load validator balance")
continue
@@ -317,8 +266,9 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
var b *precompute.Balance
var v []*precompute.Validator
var err error
if headState.Version() == version.Phase0 {
switch headState.Version() {
case version.Phase0:
// Validator participation should be viewed on the canonical chain.
v, b, err = precompute.New(ctx, headState)
if err != nil {
return err
@@ -327,7 +277,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
if err != nil {
return err
}
} else if headState.Version() >= version.Altair {
case version.Altair, version.Bellatrix:
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return err
@@ -336,10 +286,9 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
if err != nil {
return err
}
} else {
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
default:
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
}
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
@@ -353,7 +302,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
return nil
}
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
func reportAttestationInclusion(blk interfaces.BeaconBlock) {
for _, att := range blk.Body().Attestations() {
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
}

View File

@@ -4,9 +4,9 @@ import (
"context"
"testing"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestReportEpochMetrics_BadHeadState(t *testing.T) {

View File

@@ -1,23 +1,23 @@
package blockchain
import (
"context"
"errors"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
cs := startup.NewClockSynchronizer()
fcs := protoarray.New()
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithClockSynchronizer(cs),
}
}
@@ -25,6 +25,26 @@ func testServiceOptsWithDB(t *testing.T) []Option {
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
cs := startup.NewClockSynchronizer()
return []Option{WithClockSynchronizer(cs)}
return []Option{
withStateBalanceCache(satisfactoryStateBalanceCache()),
}
}
type mockStateByRooter struct {
state state.BeaconState
err error
}
var _ stateByRooter = &mockStateByRooter{}
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
// returns an instance of the state balance cache that can be used
// to satisfy the requirement for one in NewService, but which will
// always return an error if used.
func satisfactoryStateBalanceCache() *stateBalanceCache {
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
}

View File

@@ -1,22 +1,20 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/async/event"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
type Option func(s *Service) error
@@ -101,14 +99,6 @@ func WithSlashingPool(p slashings.PoolManager) Option {
}
}
// WithBLSToExecPool to keep track of BLS to Execution address changes.
func WithBLSToExecPool(p blstoexec.PoolManager) Option {
return func(s *Service) error {
s.cfg.BLSToExecPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
return func(s *Service) error {
@@ -157,6 +147,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
}
}
func withStateBalanceCache(c *stateBalanceCache) Option {
return func(s *Service) error {
s.justifiedBalances = c
return nil
}
}
// WithFinalizedStateAtStartUp to store finalized state at start up.
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return func(s *Service) error {
@@ -164,11 +161,3 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return nil
}
}
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
return func(s *Service) error {
s.clockSetter = gs
s.clockWaiter = gs
return nil
}
}

View File

@@ -10,35 +10,33 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
)
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
//
// def validate_merge_block(block: ReadOnlyBeaconBlock) -> None:
// def validate_merge_block(block: BeaconBlock) -> None:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBeaconBlock) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -49,13 +47,9 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlyS
if payload.IsNil() {
return errors.New("nil execution payload")
}
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)
if err != nil {
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
return errors.Wrap(err, "could not validate terminal block hash")
}
if ok {
return nil
}
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
if err != nil {
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
@@ -97,7 +91,6 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
if blk == nil {
return nil, nil, errors.New("pow block is nil")
}
blk.Version = version.Bellatrix
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
@@ -109,34 +102,32 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
return blk.ParentHash[:], blkTDUint256, nil
}
// canUseValidatedTerminalBlockHash validates if the merge block is a valid terminal PoW block.
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
// spec code:
// if TERMINAL_BLOCK_HASH != Hash32():
//
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
func canUseValidatedTerminalBlockHash(blkSlot primitives.Slot, payload interfaces.ExecutionData) (bool, error) {
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
return false, nil
return nil
}
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
return false, errors.New("terminal block hash activation epoch not reached")
return errors.New("terminal block hash activation epoch not reached")
}
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
return false, errors.New("parent hash does not match terminal block hash")
return errors.New("parent hash does not match terminal block hash")
}
return true, nil
return nil
}
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
//
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
//
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
if !ok {

View File

@@ -1,20 +1,23 @@
package blockchain
import (
"context"
"fmt"
"math/big"
"testing"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
mocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func Test_validTerminalPowBlock(t *testing.T) {
@@ -104,8 +107,16 @@ func Test_validateMergeBlock(t *testing.T) {
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -146,8 +157,16 @@ func Test_validateMergeBlock(t *testing.T) {
}
func Test_getBlkParentHashAndTD(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -194,37 +213,20 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
func Test_validateTerminalBlockHash(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
require.NoError(t, err)
ok, err := canUseValidatedTerminalBlockHash(1, wrapped)
require.NoError(t, err)
require.Equal(t, false, ok)
require.NoError(t, validateTerminalBlockHash(1, wrapped))
cfg := params.BeaconConfig()
cfg.TerminalBlockHash = [32]byte{0x01}
params.OverrideBeaconConfig(cfg)
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
require.ErrorContains(t, "terminal block hash activation epoch not reached", err)
require.Equal(t, false, ok)
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
cfg.TerminalBlockHashActivationEpoch = 0
params.OverrideBeaconConfig(cfg)
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
require.ErrorContains(t, "parent hash does not match terminal block hash", err)
require.Equal(t, false, ok)
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
wrapped, err = blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
ParentHash: cfg.TerminalBlockHash.Bytes(),
})
require.NoError(t, err)
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
require.NoError(t, err)
require.Equal(t, true, ok)
service, tr := minimalTestService(t)
ctx := tr.ctx
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
blk.SetSlot(1)
require.NoError(t, blk.SetExecution(wrapped))
require.NoError(t, service.validateMergeBlock(ctx, blk))
require.NoError(t, validateTerminalBlockHash(1, wrapped))
}

View File

@@ -5,11 +5,12 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
@@ -18,25 +19,24 @@ import (
// The delay is handled by the caller in `processAttestations`.
//
// Spec pseudocode definition:
// def on_attestation(store: Store, attestation: Attestation) -> None:
// """
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
//
// def on_attestation(store: Store, attestation: Attestation) -> None:
// """
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
// An ``attestation`` that is asserted as invalid may be valid at a later time,
// consider scheduling it for later processing in such case.
// """
// validate_on_attestation(store, attestation)
// store_target_checkpoint_state(store, attestation.data.target)
//
// An ``attestation`` that is asserted as invalid may be valid at a later time,
// consider scheduling it for later processing in such case.
// """
// validate_on_attestation(store, attestation)
// store_target_checkpoint_state(store, attestation.data.target)
// # Get state at the `target` to fully validate attestation
// target_state = store.checkpoint_states[attestation.data.target]
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Get state at the `target` to fully validate attestation
// target_state = store.checkpoint_states[attestation.data.target]
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()
@@ -62,7 +62,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
genesisTime := uint64(s.genesisTime.Unix())
// Verify attestation target is from current epoch or previous epoch.
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
return err
}
@@ -71,11 +71,11 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
return errors.Wrap(err, "could not verify attestation beacon block")
}
// Note that LMD GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
// validate_aggregate_proof.go and validate_beacon_attestation.go
// Verify attestations can only affect the fork choice of subsequent slots.
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return err
}

View File

@@ -6,30 +6,19 @@ import (
"strconv"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/async"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
@@ -43,36 +32,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
if cachedState != nil && !cachedState.IsNil() {
return cachedState, nil
}
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
// but is cheap
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute epoch start")
}
cachedState = transition.NextSlotState(c.Root, slot)
if cachedState != nil && !cachedState.IsNil() {
if cachedState.Slot() != slot {
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return cachedState, nil
}
// Do not process attestations for old non viable checkpoints otherwise
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
if err != nil {
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
}
if !ok {
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
}
// Fallback to state regeneration.
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
@@ -95,13 +55,14 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return baseState, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
currentEpoch := slots.ToEpoch(currentSlot)
var prevEpoch primitives.Epoch
var prevEpoch types.Epoch
// Prevents previous epoch under flow
if currentEpoch > 1 {
prevEpoch = currentEpoch - 1

View File

@@ -5,42 +5,46 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
_, err := blockTree1(t, beaconDB, []byte{'g'})
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -51,7 +55,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 32
blkWithValidState.Block.Slot = 2
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
@@ -66,10 +70,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
service.head = &head{
state: st,
}
tests := []struct {
name string
a *ethpb.Attestation
@@ -80,6 +80,11 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
@@ -113,7 +118,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := service.OnAttestation(ctx, tt.a, 0)
err := service.OnAttestation(ctx, tt.a)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -123,10 +128,154 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
require.NoError(t, err)
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 2
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
s, err = util.NewBeaconState()
require.NoError(t, err)
err = s.SetFork(&ethpb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
})
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
tests := []struct {
name string
a *ethpb.Attestation
wantedErr string
}{
{
name: "attestation's data slot not aligned with target vote",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}}),
wantedErr: "target epoch 100 does not match current epoch",
},
{
name: "process nil attestation",
a: nil,
wantedErr: "attestation can't be nil",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "attestation's data can't be nil",
},
{
name: "process nil field (a.Target) in attestation",
a: &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Target: nil,
Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
},
AggregationBits: make([]byte, 1),
Signature: make([]byte, 96),
},
wantedErr: "attestation's target can't be nil",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := service.OnAttestation(ctx, tt.a)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -142,12 +291,19 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
func TestStore_SaveCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -168,9 +324,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
@@ -178,17 +331,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
@@ -207,30 +351,26 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
}
func TestStore_UpdateCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
epoch := types.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -240,16 +380,8 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
epoch = 2
blk = util.NewBeaconBlock()
blk.Block.Slot = 64
r2, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
@@ -260,7 +392,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
require.NoError(t, err)
require.DeepSSZEqual(t, returned.ToProtoUnsafe(), cached.ToProtoUnsafe())
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
@@ -329,21 +461,124 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestGetAttPreState_HeadState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
_, err = service.getAttPreState(ctx, checkpoint)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc))
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
}

View File

@@ -6,28 +6,29 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"github.com/prysmaticlabs/prysm/v3/async/event"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
@@ -45,60 +46,58 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// computation in this method and methods it calls into.
//
// Spec pseudocode definition:
// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
//
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
//
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
//
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
return invalidBlock{error: err}
}
startTime := time.Now()
b := signed.Block()
preState, err := s.getBlockPreState(ctx, b)
@@ -106,14 +105,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
return err
}
// Verify that the parent block is in forkchoice
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
@@ -121,13 +115,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
@@ -141,7 +132,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
@@ -152,7 +142,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -187,45 +176,21 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
if blockRoot != headRoot {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
}
log.WithFields(logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
"receivedWeight": receivedWeight,
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
return err
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
// verify conditions for FCU, notifies FCU, and saves the new head.
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
}
@@ -240,19 +205,21 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
},
})
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}
}()
// Save finalized check point to db and more.
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
@@ -263,13 +230,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
State: signed.Block().StateRoot(),
ExecutionOptimistic: isOptimistic,
},
})
@@ -283,20 +249,17 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionData, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader interfaces.ExecutionData
var preStateHeader *enginev1.ExecutionPayloadHeader
var err error
preStateVersion := st.Version()
switch preStateVersion {
@@ -310,7 +273,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
blockRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -332,7 +295,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
if err := s.verifyBlkPreState(ctx, b); err != nil {
return err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
if err != nil {
return err
}
@@ -347,10 +310,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
sigSet := bls.NewSet()
sigSet := &bls.SignatureBatch{
Signatures: [][]byte{},
PublicKeys: []bls.PublicKey{},
Messages: [][32]byte{},
}
type versionAndHeader struct {
version int
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
@@ -387,13 +354,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
sigSet.Join(set)
}
var verify bool
if features.Get().EnableVerboseSigVerification {
verify, err = sigSet.VerifyVerbosely()
} else {
verify, err = sigSet.Verify()
}
verify, err := sigSet.Verify()
if err != nil {
return invalidBlock{error: err}
}
@@ -432,12 +393,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
tracing.AnnotateError(span, err)
return err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
@@ -445,22 +400,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
}
}
// Save boundary states that will be useful for forkchoice
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastBR := blockRoots[len(blks)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
}
// Insert all nodes but the last one to forkchoice
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
// Insert the last block to forkchoice
lastBR := blockRoots[len(blks)-1]
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
}
@@ -470,7 +415,17 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastB := blks[len(blks)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
}
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
@@ -487,7 +442,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
@@ -502,27 +456,27 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.headLock.RLock()
st := s.head.state
s.headLock.RUnlock()
if err := reportEpochMetrics(ctx, postState, st); err != nil {
return err
}
var err error
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
headSt, err := s.HeadState(ctx)
if err != nil {
return err
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
return err
}
}
return nil
@@ -530,11 +484,11 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
if !s.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(blk.ParentRoot())) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
@@ -542,12 +496,16 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
}
}
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, st, root); err != nil {
return err
}
return nil
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
@@ -570,19 +528,18 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
for _, index := range indices {
s.cfg.ForkChoiceStore.InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
s.ForkChoicer().InsertSlashedIndex(ctx, types.ValidatorIndex(index))
}
}
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock, st state.BeaconState) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
@@ -594,9 +551,18 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
return nil
}
// This removes the attestations in block `b` from the attestation mem pool.
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
atts := headBlock.Block().Body().Attestations()
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
canonical, err := s.IsCanonical(ctx, r)
if err != nil {
return err
}
if !canonical {
return nil
}
atts := b.Block().Body().Attestations()
for _, att := range atts {
if helpers.IsAggregated(att) {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
@@ -612,7 +578,7 @@ func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlo
}
// validateMergeTransitionBlock validates the merge transition block.
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) error {
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
// Skip validation if block is older than Bellatrix.
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil
@@ -639,7 +605,11 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// Skip validation if the block is not a merge transition block.
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
empty, err := consensusblocks.IsEmptyExecutionData(stateHeader)
wh, err := consensusblocks.WrappedExecutionPayloadHeader(stateHeader)
if err != nil {
return err
}
empty, err := consensusblocks.IsEmptyExecutionData(wh)
if err != nil {
return err
}
@@ -651,20 +621,45 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) spawnLateBlockTasksLoop() {
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
select {
case <-s.ctx.Done():
stateSub.Unsubscribe()
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case ti := <-ticker.C:
if !atHalfSlot(ti) {
continue
}
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, s.headRoot())
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if has && id == [8]byte{} {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).Error("Could not get head block")
} else {
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {
log.WithError(err).Error("Could not prepare payload on empty ID")
}
}
missedPayloadIDFilledCount.Inc()
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
@@ -673,47 +668,8 @@ func (s *Service) spawnLateBlockTasksLoop() {
}()
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
if s.CurrentSlot() == s.HeadSlot() {
return
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.MissedSlot,
})
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
// Returns true if time `t` is halfway through the slot in sec.
func atHalfSlot(t time.Time) bool {
s := params.BeaconConfig().SecondsPerSlot
return uint64(t.Second())%s == s/2
}

View File

@@ -1,32 +1,36 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v3/math"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() primitives.Slot {
func (s *Service) CurrentSlot() types.Slot {
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
defer span.End()
@@ -35,7 +39,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
return nil, err
}
preState, err := s.cfg.StateGen.StateByRoot(ctx, b.ParentRoot())
preState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot()))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
}
@@ -57,18 +61,22 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
parentRoot := b.ParentRoot()
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// subset of the block object.
// a subset of the block object.
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}
if err := s.VerifyFinalizedBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot())); err != nil {
return err
}
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
if err != nil {
return err
@@ -82,10 +90,39 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
return nil
}
// VerifyFinalizedBlkDescendant validates if input block root is a descendant of the
// current finalized block root.
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
defer span.End()
finalized := s.ForkChoicer().FinalizedCheckpoint()
fRoot := s.ensureRootNotZeros(finalized.Root)
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
}
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if bFinalizedRoot == nil {
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
}
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
return invalidBlock{error: err}
}
return nil
}
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
finalized := s.ForkChoicer().FinalizedCheckpoint()
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
@@ -105,11 +142,8 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
defer span.End()
// return early if new checkpoint is not newer than the one in DB
currentFinalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return err
}
if cp.Epoch <= currentFinalized.Epoch {
currentFinalizedEpoch := s.FinalizedCheckpt().Epoch
if cp.Epoch <= currentFinalizedEpoch {
return nil
}
@@ -126,7 +160,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil && !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != nil && err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return err
}
if !optimistic {
@@ -135,49 +169,86 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
return err
}
}
go func() {
// We do not pass in the parent context from the method as this method call
// is meant to be asynchronous and run in the background rather than being
// tied to the execution of a block.
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
log.WithError(err).Error("could not migrate to cold")
}
}()
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
return nil
}
// ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
ar, err = s.ancestorByDB(ctx, r, slot)
if err != nil {
return nil, err
}
}
return ar, nil
}
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasParent(r) {
return nil, errors.New("could not find root in fork choice store")
}
root, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
return root[:], err
}
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.Slot) (root [32]byte, err error) {
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
defer span.End()
root = [32]byte{}
// Stop recursive ancestry lookup if context is cancelled.
if ctx.Err() != nil {
err = ctx.Err()
return
return nil, ctx.Err()
}
signed, err := s.getBlock(ctx, r)
if err != nil {
return root, err
return nil, err
}
b := signed.Block()
if b.Slot() == slot || b.Slot() < slot {
return r, nil
return r[:], nil
}
return s.ancestorByDB(ctx, b.ParentRoot(), slot)
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot()), slot)
}
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
// Fork choice only matters from last finalized slot.
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := s.ForkChoicer().FinalizedCheckpoint()
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
@@ -185,7 +256,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
// As long as parent node is not in fork choice store, and parent node is in DB.
root := blk.ParentRoot()
root := bytesutil.ToBytes32(blk.ParentRoot())
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
if err != nil {
@@ -194,7 +265,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if b.Block().Slot() <= fSlot {
break
}
root = b.Block().ParentRoot()
root = bytesutil.ToBytes32(b.Block().ParentRoot())
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
JustifiedCheckpoint: jCheckpoint,
FinalizedCheckpoint: fCheckpoint}
@@ -203,10 +274,10 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
return ErrNotDescendantOfFinalized
if root != s.ensureRootNotZeros(finalized.Root) {
return errNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
}
// inserts finalized deposits into our finalized deposit trie.
@@ -230,9 +301,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
eth1DepositIndex -= 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
@@ -240,6 +309,23 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
return nil
}
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
for _, att := range atts {
if helpers.IsAggregated(att) {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
return nil
}
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
// fork choice justification routine.
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {

File diff suppressed because it is too large Load Diff

View File

@@ -7,37 +7,33 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/async/event"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
// attestations to see if we will reorg the incoming block
const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
AttestationStateFetcher
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
InForkchoice([32]byte) bool
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
if err != nil {
return nil, err
@@ -45,9 +41,6 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
}
@@ -57,7 +50,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
if err != nil {
return err
}
r, err := s.Ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
r, err := s.ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
if err != nil {
return err
}
@@ -67,14 +60,48 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
return nil
}
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
// When the input root is not be consistent with finalized store then we know it is not
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
return nil
}
f := s.FinalizedCheckpt()
ss, err := slots.EpochStart(f.Epoch)
if err != nil {
return err
}
r, err := s.ancestor(ctx, root, ss)
if err != nil {
return err
}
if !bytes.Equal(f.Root, r) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
select {
case <-s.ctx.Done():
stateSub.Unsubscribe()
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
@@ -88,78 +115,111 @@ func (s *Service) spawnProcessAttestationsRoutine() {
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
return
case <-pat.C():
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("Could not process new slot")
return
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, s.CurrentSlot())
if err := s.UpdateHead(s.ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
return
}
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// The caller of this function MUST hold a lock in forkchoice
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.UpdateHead")
defer span.End()
start := time.Now()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// This function is only called at 10 seconds or 0 seconds into the slot
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
if !features.Get().DisableReorgLateBlocks {
disparity += reorgLateBlockCountAttestations
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
return nil
}
s.processAttestations(ctx, disparity)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
start = time.Now()
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
s.processAttestations(ctx)
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
// Fallback to our current head root in the event of a failure.
s.headLock.RLock()
newHeadRoot = s.headRoot()
s.headLock.RUnlock()
return err
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Error("could not update forkchoice")
log.WithError(err).Warn("Resolving fork due to new attestation")
}
if changed {
s.headLock.RLock()
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
s.headLock.RUnlock()
}
s.headLock.RUnlock()
if err := s.notifyEngineIfChangedHead(ctx, newHeadRoot); err != nil {
return err
}
return nil
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) error {
s.headLock.RLock()
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
s.headLock.RUnlock()
return nil
}
s.headLock.RUnlock()
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
log.Debug("New head does not exist in DB. Do nothing")
return nil // We don't have the block, don't notify the engine and update head.
}
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
return nil
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get state from db")
return nil
}
arg := &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
}
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
if err != nil {
return err
}
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
return nil
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
func (s *Service) processAttestations(ctx context.Context) {
atts := s.cfg.AttPool.ForkchoiceAttestations()
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.Data.Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
continue
}
@@ -177,7 +237,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
continue
}
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
if err := s.receiveAttestationNoPubsub(ctx, a); err != nil {
log.WithFields(logrus.Fields{
"slot": a.Data.Slot,
"committeeIndex": a.Data.CommitteeIndex,
@@ -194,11 +254,11 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
// 1. Validate attestation, update validator's latest vote
// 2. Apply fork choice to the processed attestation
// 3. Save latest head info
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
defer span.End()
if err := s.OnAttestation(ctx, att, disparity); err != nil {
if err := s.OnAttestation(ctx, att); err != nil {
return errors.Wrap(err, "could not process attestation")
}

View File

@@ -5,18 +5,21 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -27,18 +30,22 @@ var (
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
service, _ := minimalTestService(t)
beaconDB := testDB.SetupDB(t)
service.genesisTime = time.Now()
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := service.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
e := types.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := chainService.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -61,8 +68,11 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
}
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -85,10 +95,13 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
}
func TestProcessAttestations_Ok(t *testing.T) {
service, tr := minimalTestService(t)
hook := logTest.NewGlobal()
ctx := tr.ctx
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
@@ -106,20 +119,99 @@ func TestProcessAttestations_Ok(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx, 0)
service.processAttestations(ctx)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
func TestNotifyEngineIfChangedHead(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
require.NoError(t, service.notifyEngineIfChangedHead(ctx, service.headRoot()))
hookErr := "could not notify forkchoice update"
invalidStateErr := "Could not get state from db"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{'a'}))
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
// Block in Cache
b := util.NewBeaconBlock()
b.Block.Slot = 2
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
b = util.NewBeaconBlock()
b.Block.Slot = 3
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{}))
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
fcs := doublylinkedtree.New()
opts = append(opts,
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 0, Root: service.originBlockRoot}))
copied := genesisState.Copy()
// Generate a new block for attesters to attest
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
@@ -134,54 +226,12 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
// Generate attestations for this block in Slot 1
// Generate attestatios for this block in Slot 1
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
require.NoError(t, err)
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
// Verify the target is in forkchoice
// Verify the target is in forchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
// Insert a new block to forkchoice
b, err := util.GenerateFullBlock(genesisState, pks, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
b.Block.ParentRoot = service.originBlockRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.Equal(t, 3, fcs.NodeCount())
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
service.UpdateHead(ctx, 0)
require.Equal(t, tRoot, service.headRoot())
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
}
func TestService_UpdateHead_NoAtts(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 0, Root: service.originBlockRoot}))
copied := genesisState.Copy()
// Generate a new block
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)
// Insert a new block to forkchoice
ojc := &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}
@@ -195,10 +245,11 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.Equal(t, 3, fcs.NodeCount())
service.head.root = r // Old head
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
service.UpdateHead(ctx, 0)
require.Equal(t, r, service.headRoot())
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
}

View File

@@ -1,30 +1,27 @@
package blockchain
import (
"bytes"
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error
HasBlock(ctx context.Context, root [32]byte) bool
}
@@ -35,10 +32,10 @@ type SlashingReceiver interface {
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
receivedTime := time.Now()
@@ -47,8 +44,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
@@ -56,9 +51,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
log.WithError(err).Error("Could not prune canonical objects from pool ")
// Handle post block operations such as attestations and exits.
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
return err
}
// Have we been finalizing? Should we start saving hot states to db?
@@ -67,13 +62,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
finalized := s.FinalizedCheckpt()
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
justified := s.CurrentJustifiedCheckpt()
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
@@ -92,13 +85,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
err := errors.Wrap(err, "could not process block in batch")
@@ -123,15 +113,14 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
})
// Reports on blockCopy and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
finalized := s.FinalizedCheckpt()
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
}
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := s.FinalizedCheckpt()
if finalized == nil {
return errNilFinalizedInStore
}
@@ -152,66 +141,34 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
}
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock, root [32]byte) error {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {
return err
}
// By comparing the current headroot, that has already gone through forkchoice,
// we can assume that if equal the current block root is canonical.
if !bytes.Equal(headRoot, root[:]) {
return nil
}
// Mark block exits as seen so we don't include same ones in future blocks.
for _, e := range blk.Block().Body().VoluntaryExits() {
for _, e := range b.Body().VoluntaryExits() {
s.cfg.ExitPool.MarkIncluded(e)
}
// Mark block BLS changes as seen so we don't include same ones in future blocks.
if err := s.markIncludedBlockBLSToExecChanges(blk.Block()); err != nil {
return errors.Wrap(err, "could not process BLSToExecutionChanges")
}
// Mark slashings as seen so we don't include same ones in future blocks.
for _, as := range blk.Block().Body().AttesterSlashings() {
// Mark attester slashings as seen so we don't include same ones in future blocks.
for _, as := range b.Body().AttesterSlashings() {
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
}
for _, ps := range blk.Block().Body().ProposerSlashings() {
s.cfg.SlashingPool.MarkIncludedProposerSlashing(ps)
}
return nil
}
func (s *Service) markIncludedBlockBLSToExecChanges(headBlock interfaces.ReadOnlyBeaconBlock) error {
if headBlock.Version() < version.Capella {
return nil
}
changes, err := headBlock.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, change := range changes {
s.cfg.BLSToExecPool.MarkIncluded(change)
}
return nil
}
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
// Requires a read lock on forkchoice
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality primitives.Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
var sinceFinality types.Epoch
finalized := s.FinalizedCheckpt()
if finalized == nil {
return errNilFinalizedInStore
}

View File

@@ -6,17 +6,21 @@ import (
"testing"
"time"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
blockchainTesting "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -24,12 +28,12 @@ func TestService_ReceiveBlock(t *testing.T) {
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
assert.NoError(t, err)
return blk
}
//params.SetupTestConfigCleanupWithLock(t)
params.SetupTestConfigCleanupWithLock(t)
bc := params.BeaconConfig().Copy()
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
params.OverrideBeaconConfig(bc)
@@ -92,8 +96,7 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
pending, err := s.cfg.ExitPool.PendingExits()
require.NoError(t, err)
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
if len(pending) != 0 {
t.Errorf(
"Did not mark the correct number of exits. Got %d pending but wanted %d",
@@ -120,15 +123,21 @@ func TestService_ReceiveBlock(t *testing.T) {
for _, tt := range tests {
wg.Add(1)
t.Run(tt.name, func(t *testing.T) {
s, tr := minimalTestService(t,
WithFinalizedStateAtStartUp(genesis),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
beaconDB := tr.db
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
WithFinalizedStateAtStartUp(genesis),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -150,16 +159,24 @@ func TestService_ReceiveBlock(t *testing.T) {
}
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
s, tr := minimalTestService(t,
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
ctx, beaconDB := tr.ctx, tr.db
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -185,7 +202,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
assert.NoError(t, err)
return blk
@@ -206,8 +223,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
block: genFullBlock(t, util.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
assert.Equal(t, primitives.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
assert.Equal(t, primitives.Slot(2), s.head.block.Block().Slot(), "Incorrect head block slot")
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
assert.Equal(t, types.Slot(2), s.head.block.Block().Slot(), "Incorrect head block slot")
},
},
{
@@ -225,14 +242,22 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
err := s.saveGenesisData(ctx, genesis)
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
blks := []interfaces.ReadOnlySignedBeaconBlock{wsb}
blks := []interfaces.SignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {
@@ -246,7 +271,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasBlock(t *testing.T) {
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
r := [32]byte{'a'}
if s.HasBlock(context.Background(), r) {
t.Error("Should not have block")
@@ -266,8 +294,10 @@ func TestService_HasBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -277,9 +307,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -291,57 +321,11 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
service, tr := minimalTestService(t)
pool := tr.blsPool
t.Run("pre Capella block", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyBellatrix{}
pbb := &ethpb.BeaconBlockBellatrix{
Body: body,
}
blk, err := blocks.NewBeaconBlock(pbb)
require.NoError(t, err)
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
})
t.Run("Post Capella no changes", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyCapella{}
pbb := &ethpb.BeaconBlockCapella{
Body: body,
}
blk, err := blocks.NewBeaconBlock(pbb)
require.NoError(t, err)
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
})
t.Run("Post Capella some changes", func(t *testing.T) {
idx := primitives.ValidatorIndex(123)
change := &ethpb.BLSToExecutionChange{
ValidatorIndex: idx,
}
signedChange := &ethpb.SignedBLSToExecutionChange{
Message: change,
}
body := &ethpb.BeaconBlockBodyCapella{
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
}
pbb := &ethpb.BeaconBlockCapella{
Body: body,
}
blk, err := blocks.NewBeaconBlock(pbb)
require.NoError(t, err)
pool.InsertBLSToExecChange(signedChange)
require.Equal(t, true, pool.ValidatorExists(idx))
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
require.Equal(t, false, pool.ValidatorExists(idx))
})
}

View File

@@ -11,55 +11,55 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/async/event"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
f "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
processAttestationsLock sync.Mutex
}
// config options for the service.
@@ -72,7 +72,6 @@ type config struct {
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2p p2p.Broadcaster
MaxRoutines int
StateNotifier statefeed.Notifier
@@ -86,8 +85,6 @@ type config struct {
ExecutionEngineCaller execution.EngineCaller
}
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
@@ -97,18 +94,21 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
cfg: &config{},
}
for _, opt := range opts {
if err := opt(srv); err != nil {
return nil, err
}
}
if srv.clockSetter == nil {
return nil, ErrMissingClockSetter
}
var err error
if srv.justifiedBalances == nil {
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
if err != nil {
return nil, err
}
}
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
@@ -129,8 +129,8 @@ func (s *Service) Start() {
log.Fatal(err)
}
}
s.spawnProcessAttestationsRoutine()
s.spawnLateBlockTasksLoop()
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -191,13 +191,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
}
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if justified == nil {
return errNilJustifiedCheckpoint
}
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
@@ -206,24 +199,29 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errNilFinalizedCheckpoint
}
var forkChoicer f.ForkChoicer
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
if !features.Get().DisableForkchoiceDoublyLinkedTree {
forkChoicer = doublylinkedtree.New()
} else {
forkChoicer = protoarray.New()
}
s.cfg.ForkChoiceStore = forkChoicer
if err := forkChoicer.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
if err := forkChoicer.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
forkChoicer.SetGenesisTime(uint64(s.genesisTime.Unix()))
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint state")
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
@@ -232,7 +230,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
@@ -244,10 +242,13 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
}
vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
},
})
return nil
}
@@ -364,10 +365,15 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
}
// We send out a state initialized event to the rest of the services
// running in the beacon node.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
},
})
}
// initializes the state and genesis block of the beacon chain to persistent storage
@@ -427,8 +433,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
s.originBlockRoot = genesisBlkRoot
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.WithError(err).Fatal("Could not process genesis block for fork choice")
}
@@ -449,7 +453,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
// 1.) Check fork choice store.
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
// this function requires a lock in forkchoice
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
if s.cfg.ForkChoiceStore.HasNode(root) {
return true

View File

@@ -5,10 +5,10 @@ import (
"io"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/sirupsen/logrus"
)

View File

@@ -8,36 +8,72 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/container/trie"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v3/async/event"
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/container/trie"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
ctx := context.Background()
var web3Service *execution.Service
@@ -48,7 +84,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
require.NoError(t, err)
@@ -82,8 +118,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
depositCache, err := depositcache.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
stateGen := stategen.New(beaconDB)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
@@ -92,15 +127,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithDepositCache(depositCache),
WithChainStartFetcher(web3Service),
WithAttestationPool(attestations.NewPool()),
WithSlashingPool(slashings.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithForkChoiceStore(doublylinkedtree.New()),
WithAttestationService(attService),
WithStateGen(stateGen),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithClockSynchronizer(startup.NewClockSynchronizer()),
}
chainService, err := NewService(ctx, opts...)
@@ -117,19 +148,16 @@ func TestChainStartStop_Initialized(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(1))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
ss := &ethpb.StateSummary{
Slot: 1,
@@ -154,18 +182,15 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
@@ -228,14 +253,12 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(0))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -256,9 +279,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
}
func TestChainService_InitializeChainInfo(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -270,18 +298,16 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, c.StartFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -290,7 +316,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
assert.DeepEqual(t, headBlock, pb, "Head block incorrect")
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
@@ -301,9 +327,14 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
}
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -315,26 +346,25 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB := tr.ctx, tr.db
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
ss := &ethpb.StateSummary{
Slot: finalizedSlot,
Root: headRoot[:],
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
pb, err := c.head.block.Proto()
require.NoError(t, err)
@@ -344,9 +374,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fc := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: doublylinkedtree.New()},
}
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
@@ -366,6 +395,25 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
}
}
func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
}
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -389,10 +437,10 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
}
bb := util.NewBeaconBlock()
r, err := bb.Block.HashTreeRoot()
@@ -405,21 +453,17 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(context.Background(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
stateChannel := make(chan *feed.Event, 1)
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
service.onExecutionChainStart(context.Background(), time.Now())
stateEvent := <-stateChannel
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
_, ok := stateEvent.Data.(*statefeed.InitializedData)
require.Equal(t, true, ok)
}
func BenchmarkHasBlockDB(b *testing.B) {
@@ -441,6 +485,27 @@ func BenchmarkHasBlockDB(b *testing.B) {
}
}
func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
@@ -451,7 +516,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
@@ -468,10 +533,15 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
EnableStartOptimistic: true,
})
defer resetFn()
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
@@ -482,17 +552,14 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
@@ -502,19 +569,3 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, op)
}
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
// call SetGenesis.
type MockClockSetter struct {
G *startup.Clock
Err error
}
var _ startup.ClockSetter = &MockClockSetter{}
// SetClock satisfies the ClockSetter interface.
// The value is written to an exported field 'G' so that it can be accessed in tests.
func (s *MockClockSetter) SetClock(g *startup.Clock) error {
s.G = g
return s.Err
}

View File

@@ -1,115 +0,0 @@
package blockchain
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database
fcs forkchoice.ForkChoicer
sg *stategen.State
notif statefeed.Notifier
cs *startup.ClockSynchronizer
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositcache.DepositCache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
sg := stategen.New(beaconDB, fcs)
notif := &mockBeaconNode{}
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
cs := startup.NewClockSynchronizer()
attPool := attestations.NewPool()
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositcache.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,
db: beaconDB,
fcs: fcs,
sg: sg,
notif: notif,
cs: cs,
attPool: attPool,
attSrv: attSrv,
blsPool: blsPool,
dc: dc,
}
defOpts := []Option{WithDatabase(req.db),
WithStateNotifier(req.notif),
WithStateGen(req.sg),
WithForkChoiceStore(req.fcs),
WithClockSynchronizer(req.cs),
WithAttestationPool(req.attPool),
WithAttestationService(req.attSrv),
WithBLSToExecPool(req.blsPool),
WithDepositCache(dc),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)
s, err := NewService(req.ctx, opts...)
require.NoError(t, err)
return s, req
}

View File

@@ -0,0 +1,81 @@
package blockchain
import (
"context"
"errors"
"sync"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
)
type stateBalanceCache struct {
sync.Mutex
balances []uint64
root [32]byte
stateGen stateByRooter
}
type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a state gen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {
return nil, errors.New("can't initialize state balance cache without stategen")
}
return &stateBalanceCache{stateGen: sg}, nil
}
// update is called by get() when the requested root doesn't match
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return nil, err
}
if justifiedState == nil || justifiedState.IsNil() {
return nil, errNilStateFromStategen
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
return nil, err
}
c.balances = justifiedBalances
c.root = justifiedRoot
return c.balances, nil
}
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
// when the justified root changes, and takes a context so that the long-running stategen
// read path can connect to the upstream cancellation/timeout chain.
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
c.Lock()
defer c.Unlock()
if justifiedRoot != [32]byte{} && justifiedRoot == c.root {
stateBalanceCacheHit.Inc()
return c.balances, nil
}
return c.update(ctx, justifiedRoot)
}

Some files were not shown because too many files have changed in this diff Show More