Compare commits

..

2 Commits

Author SHA1 Message Date
Preston Van Loon
9fd5575f65 Update web-ui to beta.4 (#9047)
* add site data

* gofmt

* fix embed

* fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
(cherry picked from commit 9dc4c49fbd)
2021-06-16 08:44:02 -05:00
Preston Van Loon
f5428a2c60 Revert "Invert attest-timely flag" (#9044)
This reverts commit 5fe98ab260.

(cherry picked from commit 6971245e05)
2021-06-15 18:11:06 -05:00
2184 changed files with 84071 additions and 126699 deletions

View File

@@ -21,24 +21,34 @@ run --host_force_python=PY2
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --workspace_status_command=./scripts/workspace_status.sh
build --stamp
# Use mainnet protobufs at runtime
run --define ssz=mainnet
test --define ssz=mainnet
build --define ssz=mainnet
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
test --incompatible_strict_action_env
run --incompatible_strict_action_env
# Disable kafka by default, it takes a long time to build...
build --define kafka_enabled=false
test --define kafka_enabled=false
run --define kafka_enabled=false
build --define blst_disabled=false
test --define blst_disabled=false
run --define blst_disabled=false
build:kafka_enabled --define kafka_enabled=true
build:kafka_enabled --define gotags=kafka_enabled
build:blst_disabled --define blst_disabled=true
build:blst_disabled --define gotags=blst_disabled
build:minimal --//proto:network=minimal
build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --config=llvm

View File

@@ -1 +1 @@
4.2.1
3.7.0

View File

@@ -10,7 +10,7 @@
# Prysm specific remote-cache properties.
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_toplevel
build:remote-cache --remote_download_minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
@@ -22,7 +22,7 @@ build:remote-cache --incompatible_strict_action_env=true
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
@@ -43,7 +43,3 @@ build --flaky_test_attempts=5
# Disable flaky test detection for fuzzing.
test:fuzz --flaky_test_attempts=1
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh

View File

@@ -25,22 +25,6 @@ jobs:
with:
path: ./
- name: GoImports checker
id: goimports
uses: Jerome1337/goimports-action@v1.0.2
with:
goimports-path: ./
- name: Gosec security scanner
uses: securego/gosec@master
with:
args: '-exclude=G307 -exclude-dir=crypto/bls/herumi ./...'
- name: Golangci-lint
uses: golangci/golangci-lint-action@v2
with:
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go
build:
name: Build
runs-on: ubuntu-latest

View File

@@ -14,7 +14,6 @@ exports_files([
# gazelle:prefix github.com/prysmaticlabs/prysm
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
gazelle(
name = "gazelle",
@@ -144,6 +143,12 @@ common_files = {
"//:README.md": "README.md",
}
toolchain(
name = "built_cmake_toolchain",
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
)
string_setting(
name = "gotags",
build_setting_default = "",
@@ -152,9 +157,3 @@ string_setting(
"libfuzzer",
],
)
sh_binary(
name = "prysm_sh",
srcs = ["prysm.sh"],
visibility = ["//visibility:public"],
)

View File

@@ -175,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
### Part-Time Contributors
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
- Opening up github issues to express interest in code to implement

View File

@@ -33,8 +33,8 @@ generates SSZ marshal related code based on defined data structures. These gener
also be updated and checked in as frequently.
```bash
./hack/update-go-pbs.sh
./hack/update-go-ssz.sh
./scripts/update-go-pbs.sh
./scripts/update-go-ssz.sh
```
*Recommendation: Use go build only for local development and use bazel build for production.*

View File

@@ -1,6 +1,6 @@
# Prysm Client Interoperability Guide
This README details how to setup Prysm for interop testing for usage with other Ethereum consensus clients.
This README details how to setup Prysm for interop testing for usage with other Ethereum 2.0 clients.
## Installation & Setup

View File

@@ -1,11 +1,11 @@
# Prysm: An Ethereum Consensus Implementation Written in Go
# Prysm: An Ethereum 2.0 Implementation Written in Go
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![ETH2.0_Spec_Version 1.0.0](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v1.0.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.0.0)
[![ETH2.0_Spec_Version 1.0.0](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v1.0.0-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum 2.0](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started

View File

@@ -1,11 +0,0 @@
# Security Policy
## Supported Versions
[Releases](https://github.com/prysmaticlabs/prysm/releases/) contains all available releases. We recommend using the [most recently released version](https://github.com/prysmaticlabs/prysm/releases/latest).
## Reporting a Vulnerability
Please see our signed [security.txt](https://github.com/prysmaticlabs/prysm/blob/develop/.well-known/security.txt) for preferred encryption and reporting destination.
**Please do not file a public ticket** mentioning the vulnerability, as doing so could increase the likelihood of the vulnerability being used before a fix has been created, released and installed on the network.

View File

@@ -5,7 +5,7 @@ Effective as of Oct 14, 2020
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
### About Prysm
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
### Licensing Terms
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
@@ -13,7 +13,7 @@ Prysm is a fully open-source software program licensed pursuant to the GNU Gener
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
### Risks of Operating Prysm
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.

212
WORKSPACE
View File

@@ -76,9 +76,9 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
strip_prefix = "rules_docker-0.19.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
sha256 = "59d5b42ac315e7eadffa944e86e90c2990110a1c8075f1cd145f487e999d22b3",
strip_prefix = "rules_docker-0.17.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.17.0/rules_docker-v0.17.0.tar.gz"],
)
http_archive(
@@ -88,8 +88,6 @@ http_archive(
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
# nogo check fails for certain third_party dependencies.
"//third_party:io_bazel_rules_go.patch",
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
urls = [
@@ -139,34 +137,6 @@ load(
"container_pull",
)
container_pull(
name = "cc_image_base",
digest = "sha256:2c4bb6b7236db0a55ec54ba8845e4031f5db2be957ac61867872bf42e56c4deb",
registry = "gcr.io",
repository = "distroless/cc",
)
container_pull(
name = "cc_debug_image_base",
digest = "sha256:3680c61e81f68fc00bfb5e1ec65e8e678aaafa7c5f056bc2681c29527ebbb30c",
registry = "gcr.io",
repository = "distroless/cc",
)
container_pull(
name = "go_image_base",
digest = "sha256:ba7a315f86771332e76fa9c3d423ecfdbb8265879c6f1c264d6fff7d4fa460a4",
registry = "gcr.io",
repository = "distroless/base",
)
container_pull(
name = "go_debug_image_base",
digest = "sha256:efd8711717d9e9b5d0dbb20ea10876dab0609c923bc05321b912f9239090ca80",
registry = "gcr.io",
repository = "distroless/base",
)
container_pull(
name = "alpine_cc_linux_amd64",
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
@@ -190,6 +160,36 @@ go_register_toolchains(
nogo = "@//:nogo",
)
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
load(
"@io_bazel_rules_docker//go:image.bzl",
_go_image_repos = "repositories",
)
# Golang images
# This is using gcr.io/distroless/base
_go_image_repos()
# CC images
# This is using gcr.io/distroless/base
load(
"@io_bazel_rules_docker//cc:image.bzl",
_cc_image_repos = "repositories",
)
_cc_image_repos()
http_archive(
name = "prysm_testnet_site",
build_file_content = """
@@ -225,12 +225,8 @@ filegroup(
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.3"
bls_test_version = "v0.1.1"
http_archive(
name = "consensus_spec_tests_general",
name = "eth2_spec_tests_general",
build_file_content = """
filegroup(
name = "test_data",
@@ -241,12 +237,12 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "e572f8c57e2dbbaeee056a600dc9d08396010dd5134a3a95e43c540470acf6f5",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
sha256 = "deacc076365c727d653ac064894ecf0d1b0a675d86704dc8de271259f6a7314b",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/general.tar.gz",
)
http_archive(
name = "consensus_spec_tests_minimal",
name = "eth2_spec_tests_minimal",
build_file_content = """
filegroup(
name = "test_data",
@@ -257,12 +253,12 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "7e2f62eaae9fd541690cc61d252556d0c5deb585ca1873aacbeb5b02d06f1362",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
sha256 = "6e9886af3d2f024e563249d70388129e28e3e92f742f289238ed9b7ec7a7f930",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/minimal.tar.gz",
)
http_archive(
name = "consensus_spec_tests_mainnet",
name = "eth2_spec_tests_mainnet",
build_file_content = """
filegroup(
name = "test_data",
@@ -273,64 +269,21 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "05cbb89810c8acd6c57c4773ddfd167305cd4539960e9b4d7b69e1a988b35ad2",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
name = "consensus_spec",
build_file_content = """
filegroup(
name = "spec_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "0cef67b08448f7eb43bf66c464451c9e7a4852df8ef90555cca6d440e3436882",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
http_archive(
name = "bls_spec_tests",
build_file_content = """
filegroup(
name = "test_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "93c7d006e7c5b882cbd11dc9ec6c5d0e07f4a8c6b27a32f964eb17cf2db9763a",
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
)
http_archive(
name = "eth2_networks",
build_file_content = """
filegroup(
name = "configs",
srcs = glob([
"shared/**/config.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "9dc47bf6b14aed7fac8833e35ab83a69131b43fa5789b3256bf1ac3d4861aeb8",
strip_prefix = "eth2-networks-7fa1b868985ee24aad65567f9250cf7fa86f97b1",
url = "https://github.com/eth2-clients/eth2-networks/archive/7fa1b868985ee24aad65567f9250cf7fa86f97b1.tar.gz",
sha256 = "a7b3d0ffc02a567250f424d69b2474fdc9477cd56eada60af7474560b46a8527",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/mainnet.tar.gz",
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
sha256 = "b5d7dbc6832f11b6468328a376de05959a1a9e4e9f5622499d3bab509c26b46a",
strip_prefix = "buildtools-bf564b4925ab5876a3f64d8b90fab7f769013d42",
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
)
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
git_repository(
name = "com_google_protobuf",
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
@@ -338,9 +291,34 @@ git_repository(
shallow_since = "1617835118 -0700",
)
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()
# Group the sources of the library so that CMake rule have access to it
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "rules_foreign_cc",
sha256 = "b85ce66a3410f7370d1a9a61dfe3a29c7532b7637caeb2877d8d0dfd41d77abb",
strip_prefix = "rules_foreign_cc-3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934.zip",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
rules_foreign_cc_dependencies([
"@prysm//:built_cmake_toolchain",
])
http_archive(
name = "librdkafka",
build_file_content = all_content,
sha256 = "3b99a36c082a67ef6295eabd4fb3e32ab0bff7c6b0d397d6352697335f4e57eb",
strip_prefix = "librdkafka-1.4.2",
urls = ["https://github.com/edenhill/librdkafka/archive/v1.4.2.tar.gz"],
)
http_archive(
name = "sigp_beacon_fuzz_corpora",
build_file = "//third_party:beacon-fuzz/corpora.BUILD",
@@ -362,9 +340,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "0a3d94428ea28916276694c517b82b364122063fdbf924f54ee9ae0bc500289f",
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.1/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
],
)
@@ -377,42 +355,8 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load(
"@io_bazel_rules_docker//go:image.bzl",
_go_image_repos = "repositories",
)
load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
# Golang images
# This is using gcr.io/distroless/base
_go_image_repos()
# CC images
# This is using gcr.io/distroless/base
load(
"@io_bazel_rules_docker//cc:image.bzl",
_cc_image_repos = "repositories",
)
_cc_image_repos()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()
geth_dependencies()
# Do NOT add new go dependencies here! Refer to DEPENDENCIES.md!

View File

@@ -1,40 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"api_middleware.go",
"log.go",
"param_handling.go",
"process_field.go",
"process_request.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/api/gateway/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api/grpc:go_default_library",
"//encoding/bytesutil:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"param_handling_test.go",
"process_request_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/grpc:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -1,233 +0,0 @@
package apimiddleware
import (
"net/http"
"reflect"
"github.com/gorilla/mux"
)
// ApiProxyMiddleware is a proxy between an Ethereum consensus API HTTP client and grpc-gateway.
// The purpose of the proxy is to handle HTTP requests and gRPC responses in such a way that:
// - Ethereum consensus API requests can be handled by grpc-gateway correctly
// - gRPC responses can be returned as spec-compliant Ethereum consensus API responses
type ApiProxyMiddleware struct {
GatewayAddress string
EndpointCreator EndpointFactory
}
// EndpointFactory is responsible for creating new instances of Endpoint values.
type EndpointFactory interface {
Create(path string) (*Endpoint, error)
Paths() []string
IsNil() bool
}
// Endpoint is a representation of an API HTTP endpoint that should be proxied by the middleware.
type Endpoint struct {
Path string // The path of the HTTP endpoint.
GetResponse interface{} // The struct corresponding to the JSON structure used in a GET response.
PostRequest interface{} // The struct corresponding to the JSON structure used in a POST request.
PostResponse interface{} // The struct corresponding to the JSON structure used in a POST response.
RequestURLLiterals []string // Names of URL parameters that should not be base64-encoded.
RequestQueryParams []QueryParam // Query parameters of the request.
Err ErrorJson // The struct corresponding to the error that should be returned in case of a request failure.
Hooks HookCollection // A collection of functions that can be invoked at various stages of the request/response cycle.
CustomHandlers []CustomHandler // Functions that will be executed instead of the default request/response behaviour.
}
// RunDefault expresses whether the default processing logic should be carried out after running a pre hook.
type RunDefault bool
// DefaultEndpoint returns an Endpoint with default configuration, e.g. DefaultErrorJson for error handling.
func DefaultEndpoint() Endpoint {
return Endpoint{
Err: &DefaultErrorJson{},
}
}
// QueryParam represents a single query parameter's metadata.
type QueryParam struct {
Name string
Hex bool
Enum bool
}
// CustomHandler is a function that can be invoked at the very beginning of the request,
// essentially replacing the whole default request/response logic with custom logic for a specific endpoint.
type CustomHandler = func(m *ApiProxyMiddleware, endpoint Endpoint, w http.ResponseWriter, req *http.Request) (handled bool)
// HookCollection contains hooks that can be used to amend the default request/response cycle with custom logic for a specific endpoint.
type HookCollection struct {
OnPreDeserializeRequestBodyIntoContainer func(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) (RunDefault, ErrorJson)
OnPostDeserializeRequestBodyIntoContainer func(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson
OnPreDeserializeGrpcResponseBodyIntoContainer func([]byte, interface{}) (RunDefault, ErrorJson)
OnPreSerializeMiddlewareResponseIntoJson func(interface{}) (RunDefault, []byte, ErrorJson)
}
// fieldProcessor applies the processing function f to a value when the tag is present on the field.
type fieldProcessor struct {
tag string
f func(value reflect.Value) error
}
// Run starts the proxy, registering all proxy endpoints.
func (m *ApiProxyMiddleware) Run(gatewayRouter *mux.Router) {
for _, path := range m.EndpointCreator.Paths() {
m.handleApiPath(gatewayRouter, path, m.EndpointCreator)
}
}
func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path string, endpointFactory EndpointFactory) {
gatewayRouter.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
endpoint, err := endpointFactory.Create(path)
if err != nil {
errJson := InternalServerErrorWithMessage(err, "could not create endpoint")
WriteError(w, errJson, nil)
}
for _, handler := range endpoint.CustomHandlers {
if handler(m, *endpoint, w, req) {
return
}
}
if req.Method == "POST" {
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
WriteError(w, errJson, nil)
return
}
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
WriteError(w, errJson, nil)
return
}
if errJson := SetRequestBodyToRequestContainer(endpoint.PostRequest, req); errJson != nil {
WriteError(w, errJson, nil)
return
}
}
if errJson := m.PrepareRequestForProxying(*endpoint, req); errJson != nil {
WriteError(w, errJson, nil)
return
}
grpcResp, errJson := ProxyRequest(req)
if errJson != nil {
WriteError(w, errJson, nil)
return
}
grpcRespBody, errJson := ReadGrpcResponseBody(grpcResp.Body)
if errJson != nil {
WriteError(w, errJson, nil)
return
}
var respJson []byte
if !GrpcResponseIsEmpty(grpcRespBody) {
respHasError, errJson := HandleGrpcResponseError(endpoint.Err, grpcResp, grpcRespBody, w)
if errJson != nil {
WriteError(w, errJson, nil)
return
}
if respHasError {
return
}
var resp interface{}
if req.Method == "GET" {
resp = endpoint.GetResponse
} else {
resp = endpoint.PostResponse
}
if errJson := deserializeGrpcResponseBodyIntoContainerWrapped(endpoint, grpcRespBody, resp); errJson != nil {
WriteError(w, errJson, nil)
return
}
if errJson := ProcessMiddlewareResponseFields(resp); errJson != nil {
WriteError(w, errJson, nil)
return
}
respJson, errJson = serializeMiddlewareResponseIntoJsonWrapped(endpoint, respJson, resp)
if errJson != nil {
WriteError(w, errJson, nil)
return
}
}
if errJson := WriteMiddlewareResponseHeadersAndBody(grpcResp, respJson, w); errJson != nil {
WriteError(w, errJson, nil)
return
}
if errJson := Cleanup(grpcResp.Body); errJson != nil {
WriteError(w, errJson, nil)
return
}
})
}
func deserializeRequestBodyIntoContainerWrapped(endpoint *Endpoint, req *http.Request, w http.ResponseWriter) ErrorJson {
runDefault := true
if endpoint.Hooks.OnPreDeserializeRequestBodyIntoContainer != nil {
run, errJson := endpoint.Hooks.OnPreDeserializeRequestBodyIntoContainer(endpoint, w, req)
if errJson != nil {
return errJson
}
if !run {
runDefault = false
}
}
if runDefault {
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.PostRequest); errJson != nil {
return errJson
}
}
if endpoint.Hooks.OnPostDeserializeRequestBodyIntoContainer != nil {
if errJson := endpoint.Hooks.OnPostDeserializeRequestBodyIntoContainer(endpoint, w, req); errJson != nil {
return errJson
}
}
return nil
}
func deserializeGrpcResponseBodyIntoContainerWrapped(endpoint *Endpoint, grpcResponseBody []byte, resp interface{}) ErrorJson {
runDefault := true
if endpoint.Hooks.OnPreDeserializeGrpcResponseBodyIntoContainer != nil {
run, errJson := endpoint.Hooks.OnPreDeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, resp)
if errJson != nil {
return errJson
}
if !run {
runDefault = false
}
}
if runDefault {
if errJson := DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, resp); errJson != nil {
return errJson
}
}
return nil
}
func serializeMiddlewareResponseIntoJsonWrapped(endpoint *Endpoint, respJson []byte, resp interface{}) ([]byte, ErrorJson) {
runDefault := true
var errJson ErrorJson
if endpoint.Hooks.OnPreSerializeMiddlewareResponseIntoJson != nil {
var run RunDefault
run, respJson, errJson = endpoint.Hooks.OnPreSerializeMiddlewareResponseIntoJson(resp)
if errJson != nil {
return nil, errJson
}
if !run {
runDefault = false
}
}
if runDefault {
respJson, errJson = SerializeMiddlewareResponseIntoJson(resp)
if errJson != nil {
return nil, errJson
}
}
return respJson, nil
}

View File

@@ -1,5 +0,0 @@
package apimiddleware
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "apimiddleware")

View File

@@ -1,103 +0,0 @@
package apimiddleware
import (
"encoding/base64"
"net/http"
"net/url"
"strings"
"github.com/gorilla/mux"
butil "github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/wealdtech/go-bytesutil"
)
// HandleURLParameters processes URL parameters, allowing parameterized URLs to be safely and correctly proxied to grpc-gateway.
func HandleURLParameters(url string, req *http.Request, literals []string) ErrorJson {
segments := strings.Split(url, "/")
segmentsLoop:
for i, s := range segments {
// We only care about segments which are parameterized.
if isRequestParam(s) {
// Don't do anything with parameters which should be forwarded literally to gRPC.
for _, l := range literals {
if s == "{"+l+"}" {
continue segmentsLoop
}
}
routeVar := mux.Vars(req)[s[1:len(s)-1]]
bRouteVar := []byte(routeVar)
if butil.IsHex(bRouteVar) {
var err error
bRouteVar, err = bytesutil.FromHexString(string(bRouteVar))
if err != nil {
return InternalServerErrorWithMessage(err, "could not process URL parameter")
}
}
// Converting hex to base64 may result in a value which malforms the URL.
// We use URLEncoding to safely escape such values.
base64RouteVar := base64.URLEncoding.EncodeToString(bRouteVar)
// Merge segments back into the full URL.
splitPath := strings.Split(req.URL.Path, "/")
splitPath[i] = base64RouteVar
req.URL.Path = strings.Join(splitPath, "/")
}
}
return nil
}
// HandleQueryParameters processes query parameters, allowing them to be safely and correctly proxied to grpc-gateway.
func HandleQueryParameters(req *http.Request, params []QueryParam) ErrorJson {
queryParams := req.URL.Query()
normalizeQueryValues(queryParams)
for key, vals := range queryParams {
for _, p := range params {
if key == p.Name {
if p.Hex {
queryParams.Del(key)
for _, v := range vals {
b := []byte(v)
if butil.IsHex(b) {
var err error
b, err = bytesutil.FromHexString(v)
if err != nil {
return InternalServerErrorWithMessage(err, "could not process query parameter")
}
}
queryParams.Add(key, base64.URLEncoding.EncodeToString(b))
}
}
if p.Enum {
queryParams.Del(key)
for _, v := range vals {
// gRPC expects uppercase enum values.
queryParams.Add(key, strings.ToUpper(v))
}
}
}
}
}
req.URL.RawQuery = queryParams.Encode()
return nil
}
// isRequestParam verifies whether the passed string is a request parameter.
// Request parameters are enclosed in { and }.
func isRequestParam(s string) bool {
return len(s) > 2 && s[0] == '{' && s[len(s)-1] == '}'
}
func normalizeQueryValues(queryParams url.Values) {
// Replace comma-separated values with individual values.
for key, vals := range queryParams {
splitVals := make([]string, 0)
for _, v := range vals {
splitVals = append(splitVals, strings.Split(v, ",")...)
}
queryParams[key] = splitVals
}
}

View File

@@ -1,124 +0,0 @@
package apimiddleware
import (
"bytes"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestHandleURLParameters(t *testing.T) {
var body bytes.Buffer
t.Run("no_params", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example/bar", &body)
errJson := HandleURLParameters("/not_param", request, []string{})
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/bar", request.URL.Path)
})
t.Run("with_params", func(t *testing.T) {
muxVars := make(map[string]string)
muxVars["bar_param"] = "bar"
muxVars["quux_param"] = "quux"
request := httptest.NewRequest("GET", "http://foo.example/bar/baz/quux", &body)
request = mux.SetURLVars(request, muxVars)
errJson := HandleURLParameters("/{bar_param}/not_param/{quux_param}", request, []string{})
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/YmFy/baz/cXV1eA==", request.URL.Path)
})
t.Run("with_literal", func(t *testing.T) {
muxVars := make(map[string]string)
muxVars["bar_param"] = "bar"
request := httptest.NewRequest("GET", "http://foo.example/bar/baz", &body)
request = mux.SetURLVars(request, muxVars)
errJson := HandleURLParameters("/{bar_param}/not_param/", request, []string{"bar_param"})
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/bar/baz", request.URL.Path)
})
t.Run("with_hex", func(t *testing.T) {
muxVars := make(map[string]string)
muxVars["hex_param"] = "0x626172"
request := httptest.NewRequest("GET", "http://foo.example/0x626172/baz", &body)
request = mux.SetURLVars(request, muxVars)
errJson := HandleURLParameters("/{hex_param}/not_param/", request, []string{})
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/YmFy/baz", request.URL.Path)
})
}
func TestHandleQueryParameters(t *testing.T) {
var body bytes.Buffer
t.Run("regular_params", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example?bar=bar&baz=baz", &body)
errJson := HandleQueryParameters(request, []QueryParam{{Name: "bar"}, {Name: "baz"}})
require.Equal(t, true, errJson == nil)
query := request.URL.Query()
v, ok := query["bar"]
require.Equal(t, true, ok, "query param not found")
require.Equal(t, 1, len(v), "wrong number of query param values")
assert.Equal(t, "bar", v[0])
v, ok = query["baz"]
require.Equal(t, true, ok, "query param not found")
require.Equal(t, 1, len(v), "wrong number of query param values")
assert.Equal(t, "baz", v[0])
})
t.Run("hex_and_enum_params", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example?hex=0x626172&baz=baz", &body)
errJson := HandleQueryParameters(request, []QueryParam{{Name: "hex", Hex: true}, {Name: "baz", Enum: true}})
require.Equal(t, true, errJson == nil)
query := request.URL.Query()
v, ok := query["hex"]
require.Equal(t, true, ok, "query param not found")
require.Equal(t, 1, len(v), "wrong number of query param values")
assert.Equal(t, "YmFy", v[0])
v, ok = query["baz"]
require.Equal(t, true, ok, "query param not found")
require.Equal(t, 1, len(v), "wrong number of query param values")
assert.Equal(t, "BAZ", v[0])
})
}
func TestIsRequestParam(t *testing.T) {
tests := []struct {
s string
b bool
}{
{"", false},
{"{", false},
{"}", false},
{"{}", false},
{"{x}", true},
{"{very_long_parameter_name_with_underscores}", true},
}
for _, tt := range tests {
b := isRequestParam(tt.s)
assert.Equal(t, tt.b, b)
}
}
func TestNormalizeQueryValues(t *testing.T) {
input := make(map[string][]string)
input["key"] = []string{"value1", "value2,value3,value4", "value5"}
normalizeQueryValues(input)
require.Equal(t, 5, len(input["key"]))
assert.Equal(t, "value1", input["key"][0])
assert.Equal(t, "value2", input["key"][1])
assert.Equal(t, "value3", input["key"][2])
assert.Equal(t, "value4", input["key"][3])
assert.Equal(t, "value5", input["key"][4])
}

View File

@@ -1,108 +0,0 @@
package apimiddleware
import (
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/wealdtech/go-bytesutil"
)
// processField calls each processor function on any field that has the matching tag set.
// It is a recursive function.
func processField(s interface{}, processors []fieldProcessor) error {
kind := reflect.TypeOf(s).Kind()
if kind != reflect.Ptr && kind != reflect.Slice && kind != reflect.Array {
return fmt.Errorf("processing fields of kind '%v' is unsupported", kind)
}
t := reflect.TypeOf(s).Elem()
v := reflect.Indirect(reflect.ValueOf(s))
for i := 0; i < t.NumField(); i++ {
switch v.Field(i).Kind() {
case reflect.Slice:
sliceElem := t.Field(i).Type.Elem()
kind := sliceElem.Kind()
// Recursively process slices to struct pointers.
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
for j := 0; j < v.Field(i).Len(); j++ {
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
// Process each string in string slices.
if kind == reflect.String {
for _, proc := range processors {
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
if hasTag {
for j := 0; j < v.Field(i).Len(); j++ {
if err := proc.f(v.Field(i).Index(j)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
}
}
// Recursively process struct pointers.
case reflect.Ptr:
if v.Field(i).Elem().Kind() == reflect.Struct {
if err := processField(v.Field(i).Interface(), processors); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
default:
field := t.Field(i)
for _, proc := range processors {
if _, hasTag := field.Tag.Lookup(proc.tag); hasTag {
if err := proc.f(v.Field(i)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
}
}
return nil
}
func hexToBase64Processor(v reflect.Value) error {
b, err := bytesutil.FromHexString(v.String())
if err != nil {
return err
}
v.SetString(base64.StdEncoding.EncodeToString(b))
return nil
}
func base64ToHexProcessor(v reflect.Value) error {
if v.String() == "" {
return nil
}
b, err := base64.StdEncoding.DecodeString(v.String())
if err != nil {
return err
}
v.SetString(hexutil.Encode(b))
return nil
}
func enumToLowercaseProcessor(v reflect.Value) error {
v.SetString(strings.ToLower(v.String()))
return nil
}
func timeToUnixProcessor(v reflect.Value) error {
t, err := time.Parse(time.RFC3339, v.String())
if err != nil {
return err
}
v.SetString(strconv.FormatUint(uint64(t.Unix()), 10))
return nil
}

View File

@@ -1,255 +0,0 @@
package apimiddleware
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/grpc"
)
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
func DeserializeRequestBodyIntoContainer(body io.Reader, requestContainer interface{}) ErrorJson {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&requestContainer); err != nil {
if strings.Contains(err.Error(), "json: unknown field") {
e := errors.Wrap(err, "could not decode request body")
return &DefaultErrorJson{
Message: e.Error(),
Code: http.StatusBadRequest,
}
}
return InternalServerErrorWithMessage(err, "could not decode request body")
}
return nil
}
// ProcessRequestContainerFields processes fields of an endpoint-specific container according to field tags.
func ProcessRequestContainerFields(requestContainer interface{}) ErrorJson {
if err := processField(requestContainer, []fieldProcessor{
{
tag: "hex",
f: hexToBase64Processor,
},
}); err != nil {
return InternalServerErrorWithMessage(err, "could not process request data")
}
return nil
}
// SetRequestBodyToRequestContainer makes the endpoint-specific container the new body of the request.
func SetRequestBodyToRequestContainer(requestContainer interface{}, req *http.Request) ErrorJson {
// Serialize the struct, which now includes a base64-encoded value, into JSON.
j, err := json.Marshal(requestContainer)
if err != nil {
return InternalServerErrorWithMessage(err, "could not marshal request")
}
// Set the body to the new JSON.
req.Body = ioutil.NopCloser(bytes.NewReader(j))
req.Header.Set("Content-Length", strconv.Itoa(len(j)))
req.ContentLength = int64(len(j))
return nil
}
// PrepareRequestForProxying applies additional logic to the request so that it can be correctly proxied to grpc-gateway.
func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *http.Request) ErrorJson {
req.URL.Scheme = "http"
req.URL.Host = m.GatewayAddress
req.RequestURI = ""
if errJson := HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
return errJson
}
if errJson := HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
return errJson
}
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path
return nil
}
// ProxyRequest proxies the request to grpc-gateway.
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
// We do not use http.DefaultClient because it does not have any timeout.
netClient := &http.Client{Timeout: time.Minute * 2}
grpcResp, err := netClient.Do(req)
if err != nil {
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
}
if grpcResp == nil {
return nil, &DefaultErrorJson{Message: "nil response from gRPC-gateway", Code: http.StatusInternalServerError}
}
return grpcResp, nil
}
// ReadGrpcResponseBody reads the body from the grpc-gateway's response.
func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
body, err := ioutil.ReadAll(r)
if err != nil {
return nil, InternalServerErrorWithMessage(err, "could not read response body")
}
return body, nil
}
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
responseHasError := false
if err := json.Unmarshal(respBody, errJson); err != nil {
return false, InternalServerErrorWithMessage(err, "could not unmarshal error")
}
if errJson.Msg() != "" {
responseHasError = true
// Something went wrong, but the request completed, meaning we can write headers and the error message.
for h, vs := range resp.Header {
for _, v := range vs {
w.Header().Set(h, v)
}
}
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
return responseHasError, nil
}
// GrpcResponseIsEmpty determines whether the grpc-gateway's response body contains no data.
func GrpcResponseIsEmpty(grpcResponseBody []byte) bool {
return len(grpcResponseBody) == 0 || string(grpcResponseBody) == "{}"
}
// DeserializeGrpcResponseBodyIntoContainer deserializes the grpc-gateway's response body into an endpoint-specific struct.
func DeserializeGrpcResponseBodyIntoContainer(body []byte, responseContainer interface{}) ErrorJson {
if err := json.Unmarshal(body, &responseContainer); err != nil {
return InternalServerErrorWithMessage(err, "could not unmarshal response")
}
return nil
}
// ProcessMiddlewareResponseFields processes fields of an endpoint-specific container according to field tags.
func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
if err := processField(responseContainer, []fieldProcessor{
{
tag: "hex",
f: base64ToHexProcessor,
},
{
tag: "enum",
f: enumToLowercaseProcessor,
},
{
tag: "time",
f: timeToUnixProcessor,
},
}); err != nil {
return InternalServerErrorWithMessage(err, "could not process response data")
}
return nil
}
// SerializeMiddlewareResponseIntoJson serializes the endpoint-specific response struct into a JSON representation.
func SerializeMiddlewareResponseIntoJson(responseContainer interface{}) (jsonResponse []byte, errJson ErrorJson) {
j, err := json.Marshal(responseContainer)
if err != nil {
return nil, InternalServerErrorWithMessage(err, "could not marshal response")
}
return j, nil
}
// WriteMiddlewareResponseHeadersAndBody populates headers and the body of the final response.
func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson []byte, w http.ResponseWriter) ErrorJson {
var statusCodeHeader string
for h, vs := range grpcResp.Header {
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
if strings.HasPrefix(h, "Grpc-Metadata") {
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
statusCodeHeader = vs[0]
}
} else {
for _, v := range vs {
w.Header().Set(h, v)
}
}
}
if !GrpcResponseIsEmpty(responseJson) {
w.Header().Set("Content-Length", strconv.Itoa(len(responseJson)))
if statusCodeHeader != "" {
code, err := strconv.Atoi(statusCodeHeader)
if err != nil {
return InternalServerErrorWithMessage(err, "could not parse status code")
}
w.WriteHeader(code)
} else {
w.WriteHeader(grpcResp.StatusCode)
}
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(responseJson))); err != nil {
return InternalServerErrorWithMessage(err, "could not write response message")
}
} else {
w.Header().Set("Content-Length", "0")
w.WriteHeader(grpcResp.StatusCode)
}
return nil
}
// WriteError writes the error by manipulating headers and the body of the final response.
func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.Header) {
// Include custom error in the error JSON.
hasCustomError := false
if responseHeader != nil {
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
if ok {
hasCustomError = true
// Assume header has only one value and read the 0 index.
if err := json.Unmarshal([]byte(customError[0]), errJson); err != nil {
log.WithError(err).Error("Could not unmarshal custom error message")
return
}
}
}
var j []byte
if hasCustomError {
var err error
j, err = json.Marshal(errJson)
if err != nil {
log.WithError(err).Error("Could not marshal error message")
return
}
} else {
var err error
// We marshal the response body into a DefaultErrorJson if the custom error is not present.
// This is because the ErrorJson argument is the endpoint's error definition, which may contain custom fields.
// In such a scenario marhaling the endpoint's error would populate the resulting JSON
// with these fields even if they are not present in the gRPC header.
d := &DefaultErrorJson{
Message: errJson.Msg(),
Code: errJson.StatusCode(),
}
j, err = json.Marshal(d)
if err != nil {
log.WithError(err).Error("Could not marshal error message")
return
}
}
w.Header().Set("Content-Length", strconv.Itoa(len(j)))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errJson.StatusCode())
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(j))); err != nil {
log.WithError(err).Error("Could not write error message")
}
}
// Cleanup performs final cleanup on the initial response from grpc-gateway.
func Cleanup(grpcResponseBody io.ReadCloser) ErrorJson {
if err := grpcResponseBody.Close(); err != nil {
return InternalServerErrorWithMessage(err, "could not close response body")
}
return nil
}

View File

@@ -1,409 +0,0 @@
package apimiddleware
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/prysmaticlabs/prysm/api/grpc"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/sirupsen/logrus/hooks/test"
)
type testRequestContainer struct {
TestString string
TestHexString string `hex:"true"`
}
func defaultRequestContainer() *testRequestContainer {
return &testRequestContainer{
TestString: "test string",
TestHexString: "0x666F6F", // hex encoding of "foo"
}
}
type testResponseContainer struct {
TestString string
TestHex string `hex:"true"`
TestEnum string `enum:"true"`
TestTime string `time:"true"`
}
func defaultResponseContainer() *testResponseContainer {
return &testResponseContainer{
TestString: "test string",
TestHex: "Zm9v", // base64 encoding of "foo"
TestEnum: "Test Enum",
TestTime: "2006-01-02T15:04:05Z",
}
}
type testErrorJson struct {
Message string
Code int
CustomField string
}
// StatusCode returns the error's underlying error code.
func (e *testErrorJson) StatusCode() int {
return e.Code
}
// Msg returns the error's underlying message.
func (e *testErrorJson) Msg() string {
return e.Message
}
// SetCode sets the error's underlying error code.
func (e *testErrorJson) SetCode(code int) {
e.Code = code
}
// SetMsg sets the error's underlying message.
func (e *testErrorJson) SetMsg(msg string) {
e.Message = msg
}
func TestDeserializeRequestBodyIntoContainer(t *testing.T) {
t.Run("ok", func(t *testing.T) {
var bodyJson bytes.Buffer
err := json.NewEncoder(&bodyJson).Encode(defaultRequestContainer())
require.NoError(t, err)
container := &testRequestContainer{}
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "test string", container.TestString)
})
t.Run("error", func(t *testing.T) {
var bodyJson bytes.Buffer
bodyJson.Write([]byte("foo"))
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, &testRequestContainer{})
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode request body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
t.Run("unknown field", func(t *testing.T) {
var bodyJson bytes.Buffer
bodyJson.Write([]byte("{\"foo\":\"foo\"}"))
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, &testRequestContainer{})
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode request body"))
assert.Equal(t, http.StatusBadRequest, errJson.StatusCode())
})
}
func TestProcessRequestContainerFields(t *testing.T) {
t.Run("ok", func(t *testing.T) {
container := defaultRequestContainer()
errJson := ProcessRequestContainerFields(container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "Zm9v", container.TestHexString)
})
t.Run("error", func(t *testing.T) {
errJson := ProcessRequestContainerFields("foo")
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process request data"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestSetRequestBodyToRequestContainer(t *testing.T) {
var body bytes.Buffer
request := httptest.NewRequest("GET", "http://foo.example", &body)
errJson := SetRequestBodyToRequestContainer(defaultRequestContainer(), request)
require.Equal(t, true, errJson == nil)
container := &testRequestContainer{}
require.NoError(t, json.NewDecoder(request.Body).Decode(container))
assert.Equal(t, "test string", container.TestString)
contentLengthHeader, ok := request.Header["Content-Length"]
require.Equal(t, true, ok)
require.Equal(t, 1, len(contentLengthHeader), "wrong number of header values")
assert.Equal(t, "55", contentLengthHeader[0])
assert.Equal(t, int64(55), request.ContentLength)
}
func TestPrepareRequestForProxying(t *testing.T) {
middleware := &ApiProxyMiddleware{
GatewayAddress: "http://gateway.example",
}
// We will set some params to make the request more interesting.
endpoint := Endpoint{
Path: "/{url_param}",
RequestURLLiterals: []string{"url_param"},
RequestQueryParams: []QueryParam{{Name: "query_param"}},
}
var body bytes.Buffer
request := httptest.NewRequest("GET", "http://foo.example?query_param=bar", &body)
errJson := middleware.PrepareRequestForProxying(endpoint, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "http", request.URL.Scheme)
assert.Equal(t, middleware.GatewayAddress, request.URL.Host)
assert.Equal(t, "", request.RequestURI)
}
func TestReadGrpcResponseBody(t *testing.T) {
var b bytes.Buffer
b.Write([]byte("foo"))
body, jsonErr := ReadGrpcResponseBody(&b)
require.Equal(t, true, jsonErr == nil)
assert.Equal(t, "foo", string(body))
}
func TestHandleGrpcResponseError(t *testing.T) {
response := &http.Response{
StatusCode: 400,
Header: http.Header{
"Foo": []string{"foo"},
"Bar": []string{"bar"},
},
}
writer := httptest.NewRecorder()
errJson := &testErrorJson{
Message: "foo",
Code: 400,
}
b, err := json.Marshal(errJson)
require.NoError(t, err)
hasError, e := HandleGrpcResponseError(errJson, response, b, writer)
require.Equal(t, true, e == nil)
assert.Equal(t, true, hasError)
v, ok := writer.Header()["Foo"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "foo", v[0])
v, ok = writer.Header()["Bar"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "bar", v[0])
assert.Equal(t, 400, errJson.StatusCode())
}
func TestGrpcResponseIsEmpty(t *testing.T) {
t.Run("nil", func(t *testing.T) {
assert.Equal(t, true, GrpcResponseIsEmpty(nil))
})
t.Run("empty_slice", func(t *testing.T) {
assert.Equal(t, true, GrpcResponseIsEmpty(make([]byte, 0)))
})
t.Run("empty_brackets", func(t *testing.T) {
assert.Equal(t, true, GrpcResponseIsEmpty([]byte("{}")))
})
t.Run("non_empty", func(t *testing.T) {
assert.Equal(t, false, GrpcResponseIsEmpty([]byte("{\"foo\":\"bar\"})")))
})
}
func TestDeserializeGrpcResponseBodyIntoContainer(t *testing.T) {
t.Run("ok", func(t *testing.T) {
body, err := json.Marshal(defaultRequestContainer())
require.NoError(t, err)
container := &testRequestContainer{}
errJson := DeserializeGrpcResponseBodyIntoContainer(body, container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "test string", container.TestString)
})
t.Run("error", func(t *testing.T) {
var bodyJson bytes.Buffer
bodyJson.Write([]byte("foo"))
errJson := DeserializeGrpcResponseBodyIntoContainer(bodyJson.Bytes(), &testRequestContainer{})
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not unmarshal response"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestProcessMiddlewareResponseFields(t *testing.T) {
t.Run("Ok", func(t *testing.T) {
container := defaultResponseContainer()
errJson := ProcessMiddlewareResponseFields(container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "0x666f6f", container.TestHex)
assert.Equal(t, "test enum", container.TestEnum)
assert.Equal(t, "1136214245", container.TestTime)
})
t.Run("error", func(t *testing.T) {
errJson := ProcessMiddlewareResponseFields("foo")
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process response data"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestSerializeMiddlewareResponseIntoJson(t *testing.T) {
container := defaultResponseContainer()
j, errJson := SerializeMiddlewareResponseIntoJson(container)
assert.Equal(t, true, errJson == nil)
cToDeserialize := &testResponseContainer{}
require.NoError(t, json.Unmarshal(j, cToDeserialize))
assert.Equal(t, "test string", cToDeserialize.TestString)
}
func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
t.Run("GET", func(t *testing.T) {
response := &http.Response{
Header: http.Header{
"Foo": []string{"foo"},
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
},
}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
require.NoError(t, err)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, true, errJson == nil)
v, ok := writer.Header()["Foo"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "foo", v[0])
v, ok = writer.Header()["Content-Length"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "102", v[0])
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})
t.Run("GET_no_grpc_status_code_header", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
StatusCode: 204,
}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
require.NoError(t, err)
writer := httptest.NewRecorder()
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, true, errJson == nil)
assert.Equal(t, 204, writer.Code)
})
t.Run("GET_invalid_status_code", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
}
// Set invalid status code.
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
require.NoError(t, err)
writer := httptest.NewRecorder()
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, false, errJson == nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not parse status code"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
t.Run("POST", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
StatusCode: 204,
}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
require.NoError(t, err)
writer := httptest.NewRecorder()
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, true, errJson == nil)
assert.Equal(t, 204, writer.Code)
})
t.Run("POST_with_response_body", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
StatusCode: 204,
}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
require.NoError(t, err)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, true, errJson == nil)
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})
t.Run("POST_with_empty_json_body", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
StatusCode: 204,
}
responseJson, err := json.Marshal(struct{}{})
require.NoError(t, err)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
require.Equal(t, true, errJson == nil)
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, []byte(nil), writer.Body.Bytes())
assert.Equal(t, "0", writer.Header()["Content-Length"][0])
})
}
func TestWriteError(t *testing.T) {
t.Run("ok", func(t *testing.T) {
responseHeader := http.Header{
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
}
errJson := &testErrorJson{
Message: "foo",
Code: 500,
}
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
WriteError(writer, errJson, responseHeader)
v, ok := writer.Header()["Content-Length"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "48", v[0])
v, ok = writer.Header()["Content-Type"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "application/json", v[0])
assert.Equal(t, 500, writer.Code)
eDeserialize := &testErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), eDeserialize))
assert.Equal(t, "foo", eDeserialize.Message)
assert.Equal(t, 500, eDeserialize.Code)
assert.Equal(t, "bar", eDeserialize.CustomField)
})
t.Run("invalid_custom_error_header", func(t *testing.T) {
logHook := test.NewGlobal()
responseHeader := http.Header{
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
}
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)
assert.LogsContain(t, logHook, "Could not unmarshal custom error message")
})
}

View File

@@ -1,62 +0,0 @@
package apimiddleware
import (
"net/http"
"github.com/pkg/errors"
)
// ---------------
// Error handling.
// ---------------
// ErrorJson describes common functionality of all JSON error representations.
type ErrorJson interface {
StatusCode() int
SetCode(code int)
Msg() string
SetMsg(msg string)
}
// DefaultErrorJson is a JSON representation of a simple error value, containing only a message and an error code.
type DefaultErrorJson struct {
Message string `json:"message"`
Code int `json:"code"`
}
// InternalServerErrorWithMessage returns a DefaultErrorJson with 500 code and a custom message.
func InternalServerErrorWithMessage(err error, message string) *DefaultErrorJson {
e := errors.Wrapf(err, message)
return &DefaultErrorJson{
Message: e.Error(),
Code: http.StatusInternalServerError,
}
}
// InternalServerError returns a DefaultErrorJson with 500 code.
func InternalServerError(err error) *DefaultErrorJson {
return &DefaultErrorJson{
Message: err.Error(),
Code: http.StatusInternalServerError,
}
}
// StatusCode returns the error's underlying error code.
func (e *DefaultErrorJson) StatusCode() int {
return e.Code
}
// Msg returns the error's underlying message.
func (e *DefaultErrorJson) Msg() string {
return e.Message
}
// SetCode sets the error's underlying error code.
func (e *DefaultErrorJson) SetCode(code int) {
e.Code = code
}
// SetMsg sets the error's underlying message.
func (e *DefaultErrorJson) SetMsg(msg string) {
e.Message = msg
}

View File

@@ -1,281 +0,0 @@
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"path"
"strings"
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/runtime"
"github.com/rs/cors"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
)
var _ runtime.Service = (*Gateway)(nil)
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
type PbMux struct {
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
Patterns []string // URL patterns that will be handled by Mux.
Mux *gwruntime.ServeMux // The router that will be used for grpc-gateway requests.
}
// PbHandlerRegistration is a function that registers a protobuf handler.
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
// MuxHandler is a function that implements the mux handler functionality.
type MuxHandler func(http.Handler, http.ResponseWriter, *http.Request)
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
type Gateway struct {
conn *grpc.ClientConn
pbHandlers []*PbMux
muxHandler MuxHandler
maxCallRecvMsgSize uint64
router *mux.Router
server *http.Server
cancel context.CancelFunc
remoteCert string
gatewayAddr string
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
ctx context.Context
startFailure error
remoteAddr string
allowedOrigins []string
}
// New returns a new instance of the Gateway.
func New(
ctx context.Context,
pbHandlers []*PbMux,
muxHandler MuxHandler,
remoteAddr,
gatewayAddress string,
) *Gateway {
g := &Gateway{
pbHandlers: pbHandlers,
muxHandler: muxHandler,
router: mux.NewRouter(),
gatewayAddr: gatewayAddress,
ctx: ctx,
remoteAddr: remoteAddr,
allowedOrigins: []string{},
}
return g
}
// WithRouter allows adding a custom mux router to the gateway.
func (g *Gateway) WithRouter(r *mux.Router) *Gateway {
g.router = r
return g
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func (g *Gateway) WithAllowedOrigins(origins []string) *Gateway {
g.allowedOrigins = origins
return g
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func (g *Gateway) WithRemoteCert(cert string) *Gateway {
g.remoteCert = cert
return g
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func (g *Gateway) WithMaxCallRecvMsgSize(size uint64) *Gateway {
g.maxCallRecvMsgSize = size
return g
}
// WithApiMiddleware allows adding API Middleware proxy to the gateway.
func (g *Gateway) WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) *Gateway {
g.apiMiddlewareEndpointFactory = endpointFactory
return g
}
// Start the gateway service.
func (g *Gateway) Start() {
ctx, cancel := context.WithCancel(g.ctx)
g.cancel = cancel
conn, err := g.dial(ctx, "tcp", g.remoteAddr)
if err != nil {
log.WithError(err).Error("Failed to connect to gRPC server")
g.startFailure = err
return
}
g.conn = conn
for _, h := range g.pbHandlers {
for _, r := range h.Registrations {
if err := r(ctx, h.Mux, g.conn); err != nil {
log.WithError(err).Error("Failed to register handler")
g.startFailure = err
return
}
}
for _, p := range h.Patterns {
g.router.PathPrefix(p).Handler(h.Mux)
}
}
corsMux := g.corsMiddleware(g.router)
if g.muxHandler != nil {
g.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.muxHandler(corsMux, w, r)
})
}
if g.apiMiddlewareEndpointFactory != nil && !g.apiMiddlewareEndpointFactory.IsNil() {
go g.registerApiMiddleware()
}
g.server = &http.Server{
Addr: g.gatewayAddr,
Handler: g.router,
}
go func() {
log.WithField("address", g.gatewayAddr).Info("Starting gRPC gateway")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start gRPC gateway")
g.startFailure = err
return
}
}()
}
// Status of grpc gateway. Returns an error if this service is unhealthy.
func (g *Gateway) Status() error {
if g.startFailure != nil {
return g.startFailure
}
if s := g.conn.GetState(); s != connectivity.Ready {
return fmt.Errorf("grpc server is %s", s)
}
return nil
}
// Stop the gateway with a graceful shutdown.
func (g *Gateway) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}
func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
c := cors.New(cors.Options{
AllowedOrigins: g.allowedOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler(h)
}
const swaggerDir = "proto/prysm/v1alpha1/"
// SwaggerServer returns swagger specification files located under "/swagger/"
func SwaggerServer() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !strings.HasSuffix(r.URL.Path, ".swagger.json") {
log.Debugf("Not found: %s", r.URL.Path)
http.NotFound(w, r)
return
}
log.Debugf("Serving %s\n", r.URL.Path)
p := strings.TrimPrefix(r.URL.Path, "/swagger/")
p = path.Join(swaggerDir, p)
http.ServeFile(w, r, p)
}
}
// dial the gRPC server.
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
switch network {
case "tcp":
return g.dialTCP(ctx, addr)
case "unix":
return g.dialUnix(ctx, addr)
default:
return nil, fmt.Errorf("unsupported network type %q", network)
}
}
// dialTCP creates a client connection via TCP.
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
security := grpc.WithInsecure()
if len(g.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.remoteCert, "")
if err != nil {
return nil, err
}
security = grpc.WithTransportCredentials(creds)
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
// dialUnix creates a client connection via a unix domain socket.
// "addr" must be a valid path to the socket.
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
d := func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}
f := func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return d(addr, time.Until(deadline))
}
return d(addr, 0)
}
opts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
func (g *Gateway) registerApiMiddleware() {
proxy := &apimiddleware.ApiProxyMiddleware{
GatewayAddress: g.gatewayAddr,
EndpointCreator: g.apiMiddlewareEndpointFactory,
}
log.Info("Starting API middleware")
proxy.Run(g.router)
}

View File

@@ -1,120 +0,0 @@
package gateway
import (
"context"
"flag"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
type mockEndpointFactory struct {
}
func (*mockEndpointFactory) Paths() []string {
return []string{}
}
func (*mockEndpointFactory) Create(_ string) (*apimiddleware.Endpoint, error) {
return nil, nil
}
func (*mockEndpointFactory) IsNil() bool {
return false
}
func TestGateway_Customized(t *testing.T) {
r := mux.NewRouter()
cert := "cert"
origins := []string{"origin"}
size := uint64(100)
endpointFactory := &mockEndpointFactory{}
g := New(
context.Background(),
[]*PbMux{},
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
},
"",
"",
).WithRouter(r).
WithRemoteCert(cert).
WithAllowedOrigins(origins).
WithMaxCallRecvMsgSize(size).
WithApiMiddleware(endpointFactory)
assert.Equal(t, r, g.router)
assert.Equal(t, cert, g.remoteCert)
require.Equal(t, 1, len(g.allowedOrigins))
assert.Equal(t, origins[0], g.allowedOrigins[0])
assert.Equal(t, size, g.maxCallRecvMsgSize)
assert.Equal(t, endpointFactory, g.apiMiddlewareEndpointFactory)
}
func TestGateway_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
g := New(
ctx.Context,
[]*PbMux{},
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
},
selfAddress,
gatewayAddress,
)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting gRPC gateway")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err := g.Stop()
require.NoError(t, err)
}
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
g := New(
ctx.Context,
[]*PbMux{},
/* muxHandler */ nil,
selfAddress,
gatewayAddress,
)
writer := httptest.NewRecorder()
g.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

View File

@@ -1,8 +0,0 @@
package grpc
// CustomErrorMetadataKey is the name of the metadata key storing additional error information.
// Metadata value is expected to be a byte-encoded JSON object.
const CustomErrorMetadataKey = "Custom-Error"
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
const HttpCodeMetadataKey = "X-Http-Code"

View File

@@ -1,9 +1,9 @@
# Prysmatic Labs Beacon Chain Implementation
This is the main project folder for the beacon chain implementation of Ethereum written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
Also, read the official beacon chain [specification](https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.

View File

@@ -1,16 +1,15 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"head.go",
"head_sync_committee_info.go",
"info.go",
"init_sync_process_block.go",
"log.go",
"metrics.go",
"options.go",
"process_attestation.go",
"process_attestation_helpers.go",
"process_block.go",
@@ -23,23 +22,16 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//testing/fuzz:__pkg__",
"//testing/slasher/simulator:__pkg__",
"//fuzz:__pkg__",
],
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
@@ -49,21 +41,23 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/interface:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/copyutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/interfaces:go_default_library",
"//shared/mputil:go_default_library",
"//shared/params:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/timeutils:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -89,11 +83,9 @@ go_test(
"blockchain_test.go",
"chain_info_test.go",
"checktags_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"info_test.go",
"init_test.go",
"log_test.go",
"metrics_test.go",
"process_attestation_test.go",
"process_block_test.go",
@@ -105,25 +97,26 @@ go_test(
embed = [":go_default_library"],
gotags = ["develop"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state/stateV0:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
@@ -156,23 +149,23 @@ go_test(
race = "on",
tags = ["race_on"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",

View File

@@ -7,11 +7,13 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -22,18 +24,15 @@ type ChainInfoFetcher interface {
FinalizationFetcher
GenesisFetcher
CanonicalFetcher
ForkFetcher
TimeFetcher
HeadDomainFetcher
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
// TimeFetcher retrieves the Eth2 data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
CurrentSlot() types.Slot
}
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
// GenesisFetcher retrieves the eth2 data related to its genesis.
type GenesisFetcher interface {
GenesisValidatorRoot() [32]byte
}
@@ -43,23 +42,19 @@ type GenesisFetcher interface {
type HeadFetcher interface {
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
HeadState(ctx context.Context) (iface.BeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
type ForkFetcher interface {
CurrentFork() *ethpb.Fork
CurrentFork() *pb.Fork
}
// CanonicalFetcher retrieves the current chain's canonical information.
@@ -82,7 +77,7 @@ func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.finalizedCheckpt)
return copyutil.CopyCheckpoint(s.finalizedCheckpt)
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
@@ -91,7 +86,7 @@ func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.justifiedCheckpt)
return copyutil.CopyCheckpoint(s.justifiedCheckpt)
}
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
@@ -100,7 +95,7 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.prevJustifiedCheckpt)
return copyutil.CopyCheckpoint(s.prevJustifiedCheckpt)
}
// HeadSlot returns the slot of the head of the chain.
@@ -144,7 +139,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
func (s *Service) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -158,7 +153,7 @@ func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error
// HeadState returns the head state of the chain.
// If the head is nil from service struct,
// it will attempt to get the head state from DB.
func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
func (s *Service) HeadState(ctx context.Context) (iface.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
defer span.End()
s.headLock.RLock()
@@ -182,7 +177,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch)
if !s.hasHeadState() {
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
}
// HeadSeed returns the seed from the head view of a given epoch.
@@ -243,12 +238,12 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
}
// CurrentFork retrieves the latest fork information of the beacon chain.
func (s *Service) CurrentFork() *ethpb.Fork {
func (s *Service) CurrentFork() *pb.Fork {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return &ethpb.Fork{
return &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
@@ -288,27 +283,3 @@ func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
return headsRoots, headsSlots
}
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return 0, false
}
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
}
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([48]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [48]byte{}, nil
}
v, err := s.headValidatorAtIndex(index)
if err != nil {
return [48]byte{}, err
}
return v.PublicKey(), nil
}

View File

@@ -6,15 +6,15 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestHeadSlot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
cfg: &Config{BeaconDB: beaconDB},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -25,7 +25,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
go func() {
@@ -38,8 +38,8 @@ func TestHeadRoot_DataRace(t *testing.T) {
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: interfaces.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -51,7 +51,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -9,14 +9,15 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"google.golang.org/protobuf/proto"
)
@@ -104,7 +105,7 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{})
require.NoError(t, err)
c.head = &head{slot: 100, state: s}
assert.Equal(t, types.Slot(100), c.HeadSlot())
@@ -120,13 +121,13 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
func TestHeadRoot_UseDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := &Service{cfg: &config{BeaconDB: beaconDB}}
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
@@ -134,12 +135,12 @@ func TestHeadRoot_UseDB(t *testing.T) {
}
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{})
require.NoError(t, err)
c := &Service{}
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
c.head = &head{block: interfaces.WrappedPhase0SignedBeaconBlock(b), state: s}
recevied, err := c.HeadBlock(context.Background())
require.NoError(t, err)
@@ -147,7 +148,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
}
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -163,8 +164,8 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
}
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Fork: f})
f := &pb.Fork{Epoch: 999}
s, err := stateV0.InitializeFromProto(&pb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -174,7 +175,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
}
func TestCurrentFork_NilHeadSTate(t *testing.T) {
f := &ethpb.Fork{
f := &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
@@ -189,7 +190,7 @@ func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
@@ -203,7 +204,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Eth1Data: d})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -217,11 +218,11 @@ func TestIsCanonical_Ok(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
blk := util.NewBeaconBlock()
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
@@ -233,7 +234,7 @@ func TestIsCanonical_Ok(t *testing.T) {
}
func TestService_HeadValidatorsIndices(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
s, _ := testutil.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{}
@@ -248,7 +249,7 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
}
func TestService_HeadSeed(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
s, _ := testutil.DeterministicGenesisState(t, 1)
c := &Service{}
seed, err := helpers.Seed(s, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
@@ -265,7 +266,7 @@ func TestService_HeadSeed(t *testing.T) {
}
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
s, _ := testutil.DeterministicGenesisState(t, 1)
c := &Service{}
c.head = &head{}
@@ -278,14 +279,14 @@ func TestService_HeadGenesisValidatorRoot(t *testing.T) {
}
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
@@ -296,61 +297,3 @@ func TestService_ChainHeads(t *testing.T) {
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{state: s}
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
require.Equal(t, true, e)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
c := &Service{}
c.head = nil
idx, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
require.Equal(t, types.ValidatorIndex(0), idx)
c.head = &head{state: nil}
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{state: s}
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes48(v.PublicKey), p)
}
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
c := &Service{}
c.head = nil
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [48]byte{}, p)
c.head = &head{state: nil}
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [48]byte{}, p)
}

View File

@@ -10,25 +10,23 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This defines the current chain service's view of head.
type head struct {
slot types.Slot // current head slot.
root [32]byte // current head root.
block block.SignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot types.Slot // current head slot.
root [32]byte // current head root.
block interfaces.SignedBeaconBlock // current head block.
state iface.BeaconState // current head state.
}
// Determined the head from the fork choice service and saves its new data
@@ -107,8 +105,8 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
if err != nil {
return err
}
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
return err
if newHeadBlock == nil || newHeadBlock.IsNil() || newHeadBlock.Block().IsNil() {
return errors.New("cannot save nil head block")
}
// Get the new head state from cached state or DB.
@@ -131,7 +129,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
absoluteSlotDifference := slotutil.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
@@ -141,14 +139,10 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
Epoch: helpers.SlotToEpoch(newHeadSlot),
},
})
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
return err
}
reorgCount.Inc()
}
@@ -174,8 +168,8 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := helpers.BeaconBlockIsNil(b); err != nil {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlock, r [32]byte, hs iface.BeaconState) error {
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
return err
}
cachedHeadRoot, err := s.HeadRoot(ctx)
@@ -191,7 +185,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, state iface.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -207,7 +201,7 @@ func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state st
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaconBlock, state iface.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -240,14 +234,14 @@ func (s *Service) headRoot() [32]byte {
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() block.SignedBeaconBlock {
func (s *Service) headBlock() interfaces.SignedBeaconBlock {
return s.head.block.Copy()
}
// This returns the head state.
// It does a full copy on head state for immutability.
// This is a lock free version.
func (s *Service) headState(ctx context.Context) state.BeaconState {
func (s *Service) headState(ctx context.Context) iface.BeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()
@@ -260,13 +254,6 @@ func (s *Service) headGenesisValidatorRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// This returns the validator referenced by the provided index in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOnlyValidator, error) {
return s.head.state.ValidatorAtIndexReadOnly(index)
}
// Returns true if head state exists.
// This is the lock free version.
func (s *Service) hasHeadState() bool {
@@ -281,7 +268,7 @@ func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot
s.clearInitSyncBlocks()
var justifiedState state.BeaconState
var justifiedState iface.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
@@ -298,10 +285,10 @@ func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot
return errors.New("justified state can't be nil")
}
epoch := time.CurrentEpoch(justifiedState)
epoch := helpers.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := justifiedState.ReadFromEveryValidator(func(idx int, val iface.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
@@ -328,7 +315,7 @@ func (s *Service) getJustifiedBalances() []uint64 {
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadState iface.BeaconState,
newHeadStateRoot,
newHeadRoot []byte,
) error {
@@ -336,15 +323,15 @@ func (s *Service) notifyNewHeadEvent(
currentDutyDependentRoot := s.genesisRoot[:]
var previousDutyEpoch types.Epoch
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
currentDutyEpoch := helpers.SlotToEpoch(newHeadSlot)
if currentDutyEpoch > 0 {
previousDutyEpoch = currentDutyEpoch.Sub(1)
}
currentDutySlot, err := slots.EpochStart(currentDutyEpoch)
currentDutySlot, err := helpers.StartSlot(currentDutyEpoch)
if err != nil {
return errors.Wrap(err, "could not get duty slot")
}
previousDutySlot, err := slots.EpochStart(previousDutyEpoch)
previousDutySlot, err := helpers.StartSlot(previousDutyEpoch)
if err != nil {
return errors.Wrap(err, "could not get duty slot")
}
@@ -366,47 +353,10 @@ func (s *Service) notifyNewHeadEvent(
Slot: newHeadSlot,
Block: newHeadRoot,
State: newHeadStateRoot,
EpochTransition: slots.IsEpochStart(newHeadSlot),
EpochTransition: helpers.IsEpochEnd(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot,
CurrentDutyDependentRoot: currentDutyDependentRoot,
},
})
return nil
}
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
if !features.Get().CorrectlyInsertOrphanedAtts {
return nil
}
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
if err != nil {
return err
}
if orphanedBlk == nil || orphanedBlk.IsNil() {
return errors.New("orphaned block can't be nil")
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// Is the attestation one epoch older.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
return nil
}

View File

@@ -1,174 +0,0 @@
package blockchain
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
// Initialize the state cache for sync committees.
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
}
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
// The head sync committee domain functions return callers domain data with respect to slot and head state.
type HeadDomainFetcher interface {
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
}
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
}
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
}
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
}
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
// This means that when assigned to an epoch sync committee signatures must be produced and broadcast for slots on range
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
// rather than for the range
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
nextSlotEpoch := slots.ToEpoch(slot + 1)
currentEpoch := slots.ToEpoch(slot)
switch {
case slots.SyncCommitteePeriod(nextSlotEpoch) == slots.SyncCommitteePeriod(currentEpoch):
return s.headCurrentSyncCommitteeIndices(ctx, index, slot)
// At sync committee period boundary, validator should sample the next epoch sync committee.
case slots.SyncCommitteePeriod(nextSlotEpoch) == slots.SyncCommitteePeriod(currentEpoch)+1:
return s.headNextSyncCommitteeIndices(ctx, index, slot)
default:
// Impossible condition.
return nil, errors.New("could get calculate sync subcommittee based on the period")
}
}
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
}
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return helpers.NextPeriodSyncSubcommitteeIndices(headState, index)
}
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
nextSlotEpoch := slots.ToEpoch(headState.Slot() + 1)
currEpoch := slots.ToEpoch(headState.Slot())
var syncCommittee *ethpb.SyncCommittee
if currEpoch == nextSlotEpoch || slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(nextSlotEpoch) {
syncCommittee, err = headState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
} else {
syncCommittee, err = headState.NextSyncCommittee()
if err != nil {
return nil, err
}
}
return altair.SyncSubCommitteePubkeys(syncCommittee, committeeIndex)
}
// returns calculated domain using input `domain` and `slot`.
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
var headState state.BeaconState
var err error
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
mLock.Lock()
defer mLock.Unlock()
// If there's already a head state exists with the request slot, we don't need to process slots.
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
switch {
case err == nil:
syncHeadStateHit.Inc()
headState = cachedState
return headState, nil
case errors.Is(err, cache.ErrNotFound):
headState, err = s.HeadState(ctx)
if err != nil {
return nil, err
}
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
if slot > headState.Slot() {
headState, err = transition.ProcessSlots(ctx, headState, slot)
if err != nil {
return nil, err
}
}
syncHeadStateMiss.Inc()
err = syncCommitteeHeadStateCache.Put(slot, headState)
return headState, err
default:
// In the event, we encounter another error
// we return it.
return nil, err
}
}

View File

@@ -1,176 +0,0 @@
package blockchain
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.headCurrentSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.headNextSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(0), types.CommitteeIndex(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.HeadSyncCommitteeDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Current period
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepEqual(t, a, b)
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepNotEqual(t, a, b)
}
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
require.Equal(t, 0, len(indices))
}
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
require.Equal(t, int(subCommitteeSize), len(pubkeys))
}
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
c := syncCommitteeHeadStateCache
t.Cleanup(func() {
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
})
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(100))
cachedState, err := c.Get(101)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
require.Equal(t, nil, cachedState)
require.NoError(t, c.Put(101, beaconState))
cachedState, err = c.Get(101)
require.NoError(t, err)
require.DeepEqual(t, beaconState, cachedState)
}

View File

@@ -4,20 +4,18 @@ import (
"bytes"
"context"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -38,30 +36,31 @@ func TestSaveHead_Different(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
util.NewBeaconBlock()
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
oldRoot := [32]byte{'A'}
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
slot: 0,
root: oldRoot,
block: interfaces.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: 0,
StateRoot: make([]byte, 32),
},
},
),
}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock := testutil.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
@@ -80,31 +79,33 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
oldRoot := [32]byte{'A'}
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
slot: 0,
root: oldRoot,
block: interfaces.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: 0,
StateRoot: make([]byte, 32),
},
},
),
}
reorgChainParent := [32]byte{'B'}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock := testutil.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
@@ -124,9 +125,9 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
state, _ := util.DeterministicGenesisState(t, 100)
state, _ := testutil.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
@@ -136,8 +137,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b := util.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
b := testutil.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -150,10 +151,10 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
bState, _ := testutil.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
srv := &Service{
cfg: &config{
cfg: &Config{
StateNotifier: notifier,
},
genesisRoot: [32]byte{1},
@@ -178,18 +179,18 @@ func Test_notifyNewHeadEvent(t *testing.T) {
require.DeepSSZEqual(t, wanted, eventHead)
})
t.Run("non_genesis_values", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
bState, _ := testutil.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
genesisRoot := [32]byte{1}
srv := &Service{
cfg: &config{
cfg: &Config{
StateNotifier: notifier,
},
genesisRoot: genesisRoot,
}
epoch1Start, err := slots.EpochStart(1)
epoch1Start, err := helpers.StartSlot(1)
require.NoError(t, err)
epoch2Start, err := slots.EpochStart(1)
epoch2Start, err := helpers.StartSlot(1)
require.NoError(t, err)
require.NoError(t, bState.SetSlot(epoch1Start))
@@ -206,62 +207,10 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Slot: epoch2Start,
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: true,
EpochTransition: false,
PreviousDutyDependentRoot: genesisRoot[:],
CurrentDutyDependentRoot: make([]byte, 32),
}
require.DeepSSZEqual(t, wanted, eventHead)
})
}
func TestSaveOrphanedAtts(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyInsertOrphanedAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepSSZEqual(t, atts, savedAtts)
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyInsertOrphanedAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}

View File

@@ -6,7 +6,7 @@ import (
"net/http"
"github.com/emicklei/dot"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/shared/params"
)
const template = `<html>

View File

@@ -9,11 +9,11 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_TreeHandler(t *testing.T) {
@@ -22,10 +22,10 @@ func TestService_TreeHandler(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
@@ -34,12 +34,11 @@ func TestService_TreeHandler(t *testing.T) {
),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx)
s, err := NewService(ctx, cfg)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
s.setHead([32]byte{'a'}, interfaces.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()), headState)
rr := httptest.NewRecorder()
handler := http.HandlerFunc(s.TreeHandler)

View File

@@ -1,11 +1,11 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/interfaces"
)
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b block.SignedBeaconBlock) {
func (s *Service) saveInitSyncBlock(r [32]byte, b interfaces.SignedBeaconBlock) {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks[r] = b
@@ -22,7 +22,7 @@ func (s *Service) hasInitSyncBlock(r [32]byte) bool {
// This retrieves a beacon block from the initial sync blocks cache using the root of
// the block.
func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
func (s *Service) getInitSyncBlock(r [32]byte) interfaces.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
b := s.initSyncBlocks[r]
@@ -31,11 +31,11 @@ func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
// blocks are unordered.
func (s *Service) getInitSyncBlocks() []block.SignedBeaconBlock {
func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
blks := make([]block.SignedBeaconBlock, 0, len(s.initSyncBlocks))
blks := make([]interfaces.SignedBeaconBlock, 0, len(s.initSyncBlocks))
for _, b := range s.initSyncBlocks {
blks = append(blks, b)
}
@@ -46,5 +46,5 @@ func (s *Service) getInitSyncBlocks() []block.SignedBeaconBlock {
func (s *Service) clearInitSyncBlocks() {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks = make(map[[32]byte]block.SignedBeaconBlock)
s.initSyncBlocks = make(map[[32]byte]interfaces.SignedBeaconBlock)
}

View File

@@ -1,7 +1,7 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/shared/params"
)
func init() {

View File

@@ -5,20 +5,19 @@ import (
"fmt"
"time"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b block.BeaconBlock) {
log := log.WithField("slot", b.Slot())
func logStateTransitionData(b interfaces.BeaconBlock) {
log := log.WithField("slot", b.Slot)
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
}
@@ -34,17 +33,11 @@ func logStateTransitionData(b block.BeaconBlock) {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() == version.Altair {
agg, err := b.Body().SyncAggregate()
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
}
log.Info("Finished applying state transition")
}
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
startTime, err := slots.ToTime(genesisTime, block.Slot())
func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
startTime, err := helpers.SlotToTime(genesisTime, block.Slot())
if err != nil {
return err
}
@@ -52,16 +45,14 @@ func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"epoch": helpers.SlotToEpoch(block.Slot()),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
"version": version.String(block.Version()),
}).Info("Synced new block")
log.WithFields(logrus.Fields{
"slot": block.Slot,
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
"sinceSlotStartTime": timeutils.Now().Sub(startTime),
"chainServiceProcessedTime": timeutils.Now().Sub(receivedTime),
}).Debug("Sync new block times")
return nil
}

View File

@@ -3,63 +3,61 @@ package blockchain
import (
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_logStateTransitionData(t *testing.T) {
tests := []struct {
name string
b block.BeaconBlock
b *ethpb.BeaconBlock
want string
}{
{name: "empty block body",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}},
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
},
{name: "has attestation",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}},
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has deposit",
b: wrapper.WrappedPhase0BeaconBlock(
&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}}}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}}}},
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}},
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
},
{name: "has proposer slashing",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}},
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
},
{name: "has exit",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}},
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
},
{name: "has everything",
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
b: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}},
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}},
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
}
for _, tt := range tests {
hook := logTest.NewGlobal()
t.Run(tt.name, func(t *testing.T) {
logStateTransitionData(tt.b)
logStateTransitionData(interfaces.WrappedPhase0BeaconBlock(tt.b))
require.LogsContain(t, hook, tt.want)
})
}

View File

@@ -3,18 +3,15 @@ package blockchain
import (
"context"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
)
var (
@@ -63,10 +60,6 @@ var (
Name: "beacon_previous_justified_root",
Help: "Previous justified root of the processed state",
})
activeValidatorCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_active_validators",
Help: "Current total active validators",
})
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "validator_count",
Help: "The total number of validators",
@@ -83,10 +76,6 @@ var (
Name: "current_eth1_data_deposit_count",
Help: "The current eth1 deposit count in the last processed state eth1data field.",
})
processedDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_processed_deposits_total",
Help: "Total number of deposits processed",
})
stateTrieReferences = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "field_references",
Help: "The number of states a particular field is shared with.",
@@ -108,13 +97,9 @@ var (
Help: "The total amount of ether, in gwei, that has been used in voting attestation head of previous epoch",
})
reorgCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_reorgs_total",
Name: "beacon_reorg_total",
Help: "Count the number of times beacon chain has a reorg",
})
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "saved_orphaned_att_total",
Help: "Count the number of times an orphaned attestation is saved",
})
attestationInclusionDelay = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "attestation_inclusion_delay_slots",
@@ -122,14 +107,6 @@ var (
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
},
)
syncHeadStateMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_head_state_miss",
Help: "The number of sync head state requests that are not present in the cache.",
})
syncHeadStateHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are present in the cache.",
})
)
// reportSlotMetrics reports slot related metrics.
@@ -144,7 +121,7 @@ func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheck
}
// reportEpochMetrics reports epoch related metrics.
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
func reportEpochMetrics(ctx context.Context, postState, headState iface.BeaconState) error {
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
// Validator instances
@@ -202,7 +179,6 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
activeBalance += exitingBalance + slashingBalance
activeEffectiveBalance += exitingEffectiveBalance + slashingEffectiveBalance
activeValidatorCount.Set(float64(activeInstances))
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
@@ -229,33 +205,15 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
beaconFinalizedEpoch.Set(float64(postState.FinalizedCheckpointEpoch()))
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
processedDepositsCount.Set(float64(postState.Eth1DepositIndex() + 1))
var b *precompute.Balance
var v []*precompute.Validator
var err error
switch headState.Version() {
case version.Phase0:
// Validator participation should be viewed on the canonical chain.
v, b, err = precompute.New(ctx, headState)
if err != nil {
return err
}
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
if err != nil {
return err
}
case version.Altair:
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return err
}
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
if err != nil {
return err
}
default:
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
// Validator participation should be viewed on the canonical chain.
v, b, err := precompute.New(ctx, headState)
if err != nil {
return err
}
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
if err != nil {
return err
}
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
@@ -270,7 +228,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
return nil
}
func reportAttestationInclusion(blk block.BeaconBlock) {
func reportAttestationInclusion(blk interfaces.BeaconBlock) {
for _, att := range blk.Body().Attestations() {
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
}

View File

@@ -4,15 +4,16 @@ import (
"context"
"testing"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
eth "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
h, err := util.NewBeaconState()
h, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, h.SetValidators(nil))
err = reportEpochMetrics(context.Background(), s, h)
@@ -20,22 +21,22 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
}
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
h, err := util.NewBeaconState()
h, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, h.AppendCurrentEpochAttestations(&eth.PendingAttestation{InclusionDelay: 0}))
require.NoError(t, h.AppendCurrentEpochAttestations(&pb.PendingAttestation{InclusionDelay: 0}))
err = reportEpochMetrics(context.Background(), s, h)
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
}
func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
h, _ := util.DeterministicGenesisState(t, 1)
h, _ := testutil.DeterministicGenesisState(t, 1)
v, err := h.ValidatorAtIndex(0)
require.NoError(t, err)
v.Slashed = true
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
require.NoError(t, h.AppendCurrentEpochAttestations(&eth.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(&eth.AttestationData{})}))
require.NoError(t, h.AppendCurrentEpochAttestations(&pb.PendingAttestation{InclusionDelay: 1, Data: testutil.HydrateAttestationData(&eth.AttestationData{})}))
err = reportEpochMetrics(context.Background(), h, h)
require.ErrorContains(t, "slot 0 out of bounds", err)
}

View File

@@ -1,139 +0,0 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
type Option func(s *Service) error
// WithMaxGoroutines to control resource use of the blockchain service.
func WithMaxGoroutines(x int) Option {
return func(s *Service) error {
s.cfg.MaxRoutines = x
return nil
}
}
// WithWeakSubjectivityCheckpoint for checkpoint sync.
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
return func(s *Service) error {
s.cfg.WeakSubjectivityCheckpt = c
return nil
}
}
// WithDatabase for head access.
func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
return func(s *Service) error {
s.cfg.BeaconDB = beaconDB
return nil
}
}
// WithChainStartFetcher to retrieve information about genesis.
func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
return func(s *Service) error {
s.cfg.ChainStartFetcher = f
return nil
}
}
// WithDepositCache for deposit lifecycle after chain inclusion.
func WithDepositCache(c *depositcache.DepositCache) Option {
return func(s *Service) error {
s.cfg.DepositCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {
s.cfg.AttPool = p
return nil
}
}
// WithExitPool for exits lifecycle after chain inclusion.
func WithExitPool(p voluntaryexits.PoolManager) Option {
return func(s *Service) error {
s.cfg.ExitPool = p
return nil
}
}
// WithSlashingPool for slashings lifecycle after chain inclusion.
func WithSlashingPool(p slashings.PoolManager) Option {
return func(s *Service) error {
s.cfg.SlashingPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
return func(s *Service) error {
s.cfg.P2p = p
return nil
}
}
// WithStateNotifier to notify an event feed of state processing.
func WithStateNotifier(n statefeed.Notifier) Option {
return func(s *Service) error {
s.cfg.StateNotifier = n
return nil
}
}
// WithForkChoiceStore to update an optimized fork-choice representation.
func WithForkChoiceStore(f forkchoice.ForkChoicer) Option {
return func(s *Service) error {
s.cfg.ForkChoiceStore = f
return nil
}
}
// WithAttestationService for dealing with attestation lifecycles.
func WithAttestationService(srv *attestations.Service) Option {
return func(s *Service) error {
s.cfg.AttService = srv
return nil
}
}
// WithStateGen for managing state regeneration and replay.
func WithStateGen(g *stategen.State) Option {
return func(s *Service) error {
s.cfg.StateGen = g
return nil
}
}
// WithSlasherAttestationsFeed to forward attestations into slasher if enabled.
func WithSlasherAttestationsFeed(f *event.Feed) Option {
return func(s *Service) error {
s.cfg.SlasherAttestationsFeed = f
return nil
}
}
// WithFinalizedStateAtStartUp to store finalized state at start up.
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return func(s *Service) error {
s.cfg.FinalizedStateAtStartUp = st
return nil
}
}

View File

@@ -5,12 +5,12 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils"
"go.opencensus.io/trace"
)
@@ -46,7 +46,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
return err
}
tgt := ethpb.CopyCheckpoint(a.Data.Target)
tgt := copyutil.CopyCheckpoint(a.Data.Target)
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
// validate_aggregate_proof.go and validate_beacon_attestation.go
@@ -62,7 +62,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
genesisTime := baseState.GenesisTime()
// Verify attestation target is from current epoch or previous epoch.
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(timeutils.Now().Unix()), tgt); err != nil {
return err
}
@@ -75,20 +75,20 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
// validate_aggregate_proof.go and validate_beacon_attestation.go
// Verify attestations can only affect the fork choice of subsequent slots.
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return err
}
// Use the target state to verify attesting indices are valid.
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.Data.Slot, a.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, a, committee)
if err != nil {
return err
}
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
return err
}

View File

@@ -7,22 +7,22 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/mputil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (iface.BeaconState, error) {
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock := mputil.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
@@ -38,23 +38,38 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
}
epochStartSlot, err := slots.EpochStart(c.Epoch)
epochStartSlot, err := helpers.StartSlot(c.Epoch)
if err != nil {
return nil, err
}
if epochStartSlot > baseState.Slot() {
baseState, err = transition.ProcessSlots(ctx, baseState, epochStartSlot)
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
if featureconfig.Get().EnableNextSlotStateCache {
baseState, err = state.ProcessSlotsUsingNextSlotCache(ctx, baseState, c.Root, epochStartSlot)
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
}
} else {
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
// Sharing the same state across caches is perfectly fine here, the fetching
// of attestation prestate is by far the most accessed state fetching pattern in
// the beacon node. An extra state instance cached isn't an issue in the bigger
// picture.
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
// To avoid sharing the same state across checkpoint state cache and hot state cache,
// we don't add the state to check point cache.
has, err := s.cfg.StateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, err
}
if !has {
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
}
return baseState, nil
@@ -63,7 +78,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
currentEpoch := slots.ToEpoch(currentSlot)
currentEpoch := helpers.SlotToEpoch(currentSlot)
var prevEpoch types.Epoch
// Prevents previous epoch under flow
if currentEpoch > 1 {
@@ -87,7 +102,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
if (b == nil || b.IsNil()) && s.hasInitSyncBlock(r) {
b = s.getInitSyncBlock(r)
}
if err := helpers.BeaconBlockIsNil(b); err != nil {
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
return err
}
if b.Block().Slot() > data.Slot {

View File

@@ -5,63 +5,63 @@ import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState := testutil.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt := testutil.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState := testutil.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
s, err = util.NewBeaconState()
s, err = testutil.NewBeaconState()
require.NoError(t, err)
err = s.SetFork(&ethpb.Fork{
err = s.SetFork(&pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
@@ -76,17 +76,17 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}{
{
name: "attestation's data slot not aligned with target vote",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
a: testutil.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
a: testutil.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
a: testutil.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}}),
wantedErr: "target epoch 100 does not match current epoch",
},
@@ -131,22 +131,21 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
att, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
copied, err = state.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
@@ -157,15 +156,14 @@ func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
err = s.SetFinalizedCheckpoint(&ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)})
require.NoError(t, err)
@@ -188,7 +186,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r = bytesutil.ToBytes32([]byte{'A'})
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
@@ -196,7 +194,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
s2, err := service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
@@ -220,7 +218,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
@@ -230,16 +228,15 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
epoch := types.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
baseState, _ := testutil.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
returned, err := service.getAttPreState(ctx, checkpoint)
@@ -255,9 +252,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
s, err := helpers.StartSlot(newCheckpoint.Epoch)
require.NoError(t, err)
baseState, err = transition.ProcessSlots(ctx, baseState, s)
baseState, err = state.ProcessSlots(ctx, baseState, s)
require.NoError(t, err)
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
@@ -270,10 +267,9 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)}))
@@ -283,10 +279,9 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Epoch: 1}))
@@ -296,10 +291,9 @@ func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)})
@@ -310,12 +304,11 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
d := util.HydrateAttestationData(&ethpb.AttestationData{})
d := testutil.HydrateAttestationData(&ethpb.AttestationData{})
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
}
@@ -323,14 +316,13 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
@@ -342,14 +334,13 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
@@ -361,23 +352,22 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b32)))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
b33 := util.NewBeaconBlock()
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b33)))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -389,23 +379,22 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b32)))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1, Root: r32[:]}
b33 := util.NewBeaconBlock()
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b33)))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -417,19 +406,18 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1, Root: r32[:]}
b33 := util.NewBeaconBlock()
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
r33, err := b33.Block.HashTreeRoot()

View File

@@ -9,19 +9,17 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -85,11 +83,12 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, blockRoot [32]byte) error {
func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := helpers.BeaconBlockIsNil(signed); err != nil {
return err
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
return errors.New("nil block")
}
b := signed.Block()
@@ -98,7 +97,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return err
}
@@ -107,90 +106,53 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
tracing.AnnotateError(span, err)
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}()
}
// Update justified check point.
currJustifiedEpoch := s.justifiedCheckpt.Epoch
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
if err := s.updateJustified(ctx, postState); err != nil {
return err
}
}
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
if newFinalized {
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
}
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.WithError(err).Warn("Could not update head")
}
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: signed.Block().Slot(),
BlockRoot: blockRoot,
SignedBlock: signed,
Verified: true,
},
})
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
if features.Get().EnableNextSlotStateCache {
if featureconfig.Get().EnableNextSlotStateCache {
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
if err := state.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
// Save justified check point to db.
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
// Update justified check point.
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustified(ctx, postState); err != nil {
return err
}
}
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
if featureconfig.Get().UpdateHeadTimely {
if newFinalized {
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
}
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.WithError(err).Warn("Could not update head")
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: signed.Block().Slot(),
BlockRoot: blockRoot,
SignedBlock: signed,
Verified: true,
},
})
}
// Update finalized check point.
if newFinalized {
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
@@ -200,6 +162,11 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
if !featureconfig.Get().UpdateHeadTimely {
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
@@ -228,7 +195,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return s.handleEpochBoundary(ctx, postState)
}
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -236,8 +203,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
if len(blks) == 0 || len(blockRoots) == 0 {
return nil, nil, errors.New("no blocks provided")
}
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
return nil, nil, err
if blks[0] == nil || blks[0].IsNil() || blks[0].Block().IsNil() {
return nil, nil, errors.New("nil block")
}
b := blks[0].Block()
@@ -261,14 +228,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
Messages: [][32]byte{},
}
var set *bls.SignatureSet
boundaries := make(map[[32]byte]state.BeaconState)
boundaries := make(map[[32]byte]iface.BeaconState)
for i, b := range blks {
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, err
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
if helpers.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
if err := s.handleEpochBoundary(ctx, preState); err != nil {
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
@@ -304,7 +271,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interfaces.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
b := signed.Block()
@@ -312,7 +279,7 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: signed.Block().Slot(),
Root: blockRoot[:],
}); err != nil {
@@ -338,28 +305,23 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
return err
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = fCheckpoint
}
return nil
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
func (s *Service) handleEpochBoundary(ctx context.Context, postState iface.BeaconState) error {
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil {
return err
}
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
copied, err := state.ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
if err := helpers.UpdateProposerIndicesInCache(copied); err != nil {
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
@@ -367,17 +329,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
var err error
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
if err := helpers.UpdateProposerIndicesInCache(postState); err != nil {
return err
}
}
@@ -387,11 +349,8 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
defer span.End()
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte,
st iface.BeaconState) error {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
@@ -399,11 +358,11 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return err
}
@@ -412,7 +371,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
return nil
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock,
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
@@ -429,7 +388,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.Be
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st iface.BeaconState, initSync bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -445,33 +404,3 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
}
return nil
}
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b block.SignedBeaconBlock) error {
if !features.Get().CorrectlyPruneCanonicalAtts {
return nil
}
canonical, err := s.IsCanonical(ctx, r)
if err != nil {
return err
}
if !canonical {
return nil
}
atts := b.Block().Body().Attestations()
for _, att := range atts {
if helpers.IsAggregated(att) {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
return nil
}

View File

@@ -8,26 +8,26 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace"
)
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() types.Slot {
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
return helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (state.BeaconState, error) {
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock) (iface.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
defer span.End()
@@ -45,7 +45,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (st
}
// Verify block slot time is not from the future.
if err := slots.VerifyTime(preState.GenesisTime(), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, err
}
@@ -58,7 +58,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (st
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) error {
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
@@ -113,7 +113,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
traceutil.AnnotateError(span, err)
return err
}
return nil
@@ -121,8 +121,8 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
finalizedSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err != nil {
return err
}
@@ -135,52 +135,60 @@ func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
//
// Spec code:
// def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
// """
// To address the bouncing attack, only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
//
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
// """
// if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
// return True
//
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
// if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
// return False
//
// return True
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
defer span.End()
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
return true, nil
}
var newJustifiedBlockSigned interfaces.SignedBeaconBlock
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
var err error
if s.hasInitSyncBlock(justifiedRoot) {
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
} else {
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
if err != nil {
return false, err
}
}
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.IsNil() || newJustifiedBlockSigned.Block().IsNil() {
return false, errors.New("nil new justified block")
}
jSlot, err := slots.EpochStart(s.justifiedCheckpt.Epoch)
newJustifiedBlock := newJustifiedBlockSigned.Block()
jSlot, err := helpers.StartSlot(s.justifiedCheckpt.Epoch)
if err != nil {
return false, err
}
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
b, err := s.ancestor(ctx, justifiedRoot[:], jSlot)
if newJustifiedBlock.Slot() <= jSlot {
return false, nil
}
var justifiedBlockSigned interfaces.SignedBeaconBlock
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if s.hasInitSyncBlock(cachedJustifiedRoot) {
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
} else {
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
if err != nil {
return false, err
}
}
if justifiedBlockSigned == nil || justifiedBlockSigned.IsNil() || justifiedBlockSigned.Block().IsNil() {
return false, errors.New("nil justified block")
}
justifiedBlock := justifiedBlockSigned.Block()
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot())
if err != nil {
return false, err
}
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
return false, nil
}
return true, nil
}
func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateJustified")
defer span.End()
func (s *Service) updateJustified(ctx context.Context, state iface.ReadOnlyBeaconState) error {
cpt := state.CurrentJustifiedCheckpoint()
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
s.bestJustifiedCheckpt = cpt
@@ -198,7 +206,7 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
}
}
return nil
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
@@ -215,9 +223,6 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
defer span.End()
// Blocks need to be saved so that we can retrieve finalized block from
// DB when migrating states.
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
@@ -228,6 +233,10 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
if err := s.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
return err
}
if !featureconfig.Get().UpdateHeadTimely {
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = cp
}
fRoot := bytesutil.ToBytes32(cp.Root)
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
@@ -325,16 +334,16 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.BeaconState) error {
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state iface.BeaconState) error {
// Update justified if it's different than the one cached in the store.
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
}
// Update justified if store justified is not in chain with finalized check point.
finalizedSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err != nil {
return err
}
@@ -355,15 +364,15 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.BeaconBlock,
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]block.BeaconBlock, 0)
pendingNodes := make([]interfaces.BeaconBlock, 0)
pendingRoots := make([][32]byte, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
slot := blk.Slot()
// Fork choice only matters from last finalized slot.
fSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
fSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err != nil {
return err
}

View File

@@ -13,77 +13,75 @@ import (
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
random := testutil.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(random)))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
s state.BeaconState
s iface.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
blk: testutil.NewBeaconBlock(),
s: st.Copy(),
wantErrString: "could not reconstruct parent state",
},
{
name: "block is from the future",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot2
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
@@ -94,7 +92,7 @@ func TestStore_OnBlock(t *testing.T) {
{
name: "could not get finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
return b
}(),
@@ -104,7 +102,7 @@ func TestStore_OnBlock(t *testing.T) {
{
name: "same slot as finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 0
b.Block.ParentRoot = randomParentRoot2
return b
@@ -124,7 +122,7 @@ func TestStore_OnBlock(t *testing.T) {
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
err = service.onBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(tt.blk), root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
@@ -134,44 +132,43 @@ func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
service.saveInitSyncBlock(gRoot, interfaces.WrappedPhase0SignedBeaconBlock(genesis))
st, keys := util.DeterministicGenesisState(t, 64)
st, keys := testutil.DeterministicGenesisState(t, 64)
bState := st.Copy()
var blks []block.SignedBeaconBlock
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
var firstState iface.BeaconState
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
bState, err = state.ExecuteStateTransition(ctx, bState, interfaces.WrappedPhase0SignedBeaconBlock(b))
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
service.saveInitSyncBlock(root, interfaces.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, interfaces.WrappedPhase0SignedBeaconBlock(b))
blkRoots = append(blkRoots, root)
}
@@ -190,26 +187,25 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.genesisTime = time.Now()
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: make([]byte, 32)})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk := testutil.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk := testutil.NewBeaconBlock()
newJustifiedBlk.Block.Slot = 1
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -225,22 +221,19 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk := testutil.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk := testutil.NewBeaconBlock()
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -255,95 +248,92 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
service.saveInitSyncBlock(gRoot, interfaces.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
b.Block.ParentRoot = gRoot[:]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block)))
require.NoError(t, service.verifyBlkPreState(ctx, interfaces.WrappedPhase0BeaconBlock(b.Block)))
}
func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
service.saveInitSyncBlock(gRoot, interfaces.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
service.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
err = service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block))
err = service.verifyBlkPreState(ctx, interfaces.WrappedPhase0BeaconBlock(b.Block))
wanted := "could not reconstruct parent state"
assert.ErrorContains(t, wanted, err)
b.Block.ParentRoot = gRoot[:]
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1})
s, err := stateV0.InitializeFromProto(&pb.BeaconState{Slot: 1})
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b).Block()))
require.NoError(t, service.verifyBlkPreState(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b).Block()))
}
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
signedBlock := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
signedBlock := testutil.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(signedBlock)))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
// Could update
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 1, Root: r[:]}))
require.NoError(t, service.updateJustified(context.Background(), s))
@@ -361,32 +351,31 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
block := testutil.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -400,32 +389,31 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
block := testutil.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -442,44 +430,43 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
assert.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63 := testutil.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b63)))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64 := testutil.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b64)))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65 := testutil.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b65)))
beaconState, _ := util.DeterministicGenesisState(t, 32)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
@@ -496,70 +483,70 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
// (B1, and B3 are all from the same slots)
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
b0 := util.NewBeaconBlock()
b0 := testutil.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b3 := util.NewBeaconBlock()
b3 := testutil.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.ParentRoot = r0[:]
r3, err := b3.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b4 := util.NewBeaconBlock()
b4 := testutil.NewBeaconBlock()
b4.Block.Slot = 4
b4.Block.ParentRoot = r3[:]
r4, err := b4.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b5 := util.NewBeaconBlock()
b5 := testutil.NewBeaconBlock()
b5.Block.Slot = 5
b5.Block.ParentRoot = r4[:]
r5, err := b5.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b6 := util.NewBeaconBlock()
b6 := testutil.NewBeaconBlock()
b6.Block.Slot = 6
b6.Block.ParentRoot = r4[:]
r6, err := b6.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b7 := util.NewBeaconBlock()
b7 := testutil.NewBeaconBlock()
b7.Block.Slot = 7
b7.Block.ParentRoot = r5[:]
r7, err := b7.Block.HashTreeRoot()
if err != nil {
return nil, err
}
b8 := util.NewBeaconBlock()
b8 := testutil.NewBeaconBlock()
b8.Block.Slot = 8
b8.Block.ParentRoot = r6[:]
r8, err := b8.Block.HashTreeRoot()
if err != nil {
return nil, err
}
st, err := util.NewBeaconState()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
beaconBlock := util.NewBeaconBlock()
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
if err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
if err := beaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
@@ -579,14 +566,14 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
}
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
svc := Service{genesisTime: timeutils.Now().Add(1 * time.Hour)}
slot := svc.CurrentSlot()
require.Equal(t, types.Slot(0), slot, "Unexpected slot")
}
func TestAncestorByDB_CtxErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(ctx)
service, err := NewService(ctx, &Config{})
require.NoError(t, err)
cancel()
@@ -598,31 +585,30 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := util.NewBeaconBlock()
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := util.NewBeaconBlock()
b200 := testutil.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
require.NoError(t, beaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(beaconBlock)))
}
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
@@ -642,28 +628,27 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
func TestAncestor_CanUseForkchoice(t *testing.T) {
ctx := context.Background()
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := util.NewBeaconBlock()
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := util.NewBeaconBlock()
b200 := testutil.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
@@ -682,31 +667,30 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := util.NewBeaconBlock()
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := util.NewBeaconBlock()
b200 := testutil.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
require.NoError(t, beaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
@@ -720,10 +704,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
func TestEnsureRootNotZeroHashes(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
cfg := &Config{}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
service.genesisRoot = [32]byte{'a'}
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
@@ -773,41 +756,40 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
},
}
for _, test := range tests {
beaconState, err := util.NewBeaconState()
beaconState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
service, err := NewService(ctx)
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.justifiedCheckpt = test.args.cachedCheckPoint
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
genesisState, err := util.NewBeaconState()
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
genesisState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
if test.args.diffFinalizedCheckPoint {
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := util.NewBeaconBlock()
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100} {
beaconBlock := util.NewBeaconBlock()
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(beaconBlock)))
}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: []byte{'c'}, Epoch: 1}
service.justifiedCheckpt.Root = r100[:]
}
require.NoError(t, service.finalizedImpliesNewJustified(ctx, beaconState))
assert.Equal(t, true, attestation.CheckPointIsEqual(test.want, service.justifiedCheckpt), "Did not get wanted check point")
assert.Equal(t, true, attestationutil.CheckPointIsEqual(test.want, service.justifiedCheckpt), "Did not get wanted check point")
}
}
@@ -815,18 +797,18 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b)))
b1 := util.NewBeaconBlock()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b1)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b1)))
type args struct {
parentRoot [32]byte
@@ -869,9 +851,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
},
}
for _, tt := range tests {
service, err := NewService(ctx)
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: tt.args.finalizedRoot[:],
}
@@ -887,18 +868,17 @@ func TestVerifyBlkDescendant(t *testing.T) {
func TestUpdateJustifiedInitSync(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
gBlk := util.NewBeaconBlock()
gBlk := testutil.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(gBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(gBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: gRoot[:]}))
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: gRoot[:]}))
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.genesisRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
@@ -916,27 +896,25 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
cfg := &Config{}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(1))
service.head = &head{state: (*v1.BeaconState)(nil)}
service.head = &head{state: (*stateV0.BeaconState)(nil)}
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
}
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
cfg := &Config{}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
s, _ := util.DeterministicGenesisState(t, 1024)
s, _ := testutil.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s))
@@ -948,18 +926,17 @@ func TestOnBlock_CanFinalize(t *testing.T) {
beaconDB := testDB.SetupDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
StateNotifier: &mock.MockStateNotifier{},
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
gs, keys := util.DeterministicGenesisState(t, 32)
gs, keys := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -969,24 +946,16 @@ func TestOnBlock_CanFinalize(t *testing.T) {
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
blk, err := testutil.GenerateFullBlock(testState, keys, testutil.DefaultBlockGenConfig(), i)
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
require.NoError(t, service.onBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(blk), r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
require.Equal(t, types.Epoch(2), service.FinalizedCheckpt().Epoch)
// The update should persist in DB.
j, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
require.NoError(t, err)
require.Equal(t, j.Epoch, service.CurrentJustifiedCheckpt().Epoch)
f, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
require.Equal(t, f.Epoch, service.FinalizedCheckpt().Epoch)
}
func TestInsertFinalizedDeposits(t *testing.T) {
@@ -994,17 +963,16 @@ func TestInsertFinalizedDeposits(t *testing.T) {
beaconDB := testDB.SetupDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
gs, _ := util.DeterministicGenesisState(t, 32)
gs, _ := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -1032,49 +1000,3 @@ func TestInsertFinalizedDeposits(t *testing.T) {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
}
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyPruneCanonicalAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyPruneCanonicalAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}

View File

@@ -9,25 +9,19 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
AttestationStateFetcher
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (iface.BeaconState, error)
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
}
@@ -48,21 +42,21 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
return nil
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
// AttestationPreState returns the pre state of attestation.
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (iface.BeaconState, error) {
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
if err != nil {
return nil, err
}
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
if err := helpers.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
return s.getAttPreState(ctx, target)
return s.getAttPreState(ctx, att.Data.Target)
}
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
targetSlot, err := slots.EpochStart(a.Data.Target.Epoch)
targetSlot, err := helpers.StartSlot(a.Data.Target.Epoch)
if err != nil {
return err
}
@@ -87,7 +81,7 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
}
f := s.FinalizedCheckpt()
ss, err := slots.EpochStart(f.Epoch)
ss, err := helpers.StartSlot(f.Epoch)
if err != nil {
return err
}
@@ -119,7 +113,7 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
log.Warn("Genesis time received, now available to process attestations")
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
st := slotutil.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
@@ -144,9 +138,9 @@ func (s *Service) processAttestations(ctx context.Context) {
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.Data.Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
continue
}

View File

@@ -7,27 +7,21 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/timeutils"
logTest "github.com/sirupsen/logrus/hooks/test"
)
var (
_ = AttestationReceiver(&Service{})
_ = AttestationStateFetcher(&Service{})
)
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
beaconDB := testDB.SetupDB(t)
@@ -35,8 +29,8 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
e := types.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := chainService.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
e := types.Epoch(helpers.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := chainService.AttestationPreState(context.Background(), &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Epoch: e}}})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
@@ -44,25 +38,24 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b32)))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b33)))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
wanted := "FFG and LMD votes are not consistent"
a := util.NewAttestation()
a := testutil.NewAttestation()
a.Data.Target.Epoch = 1
a.Data.Target.Root = []byte{'a'}
a.Data.BeaconBlockRoot = r33[:]
@@ -73,24 +66,23 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b32)))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b33)))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
a := util.NewAttestation()
a := testutil.NewAttestation()
a.Data.Target.Epoch = 1
a.Data.Target.Root = r32[:]
a.Data.BeaconBlockRoot = r33[:]
@@ -103,24 +95,23 @@ func TestProcessAttestations_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
AttPool: attestations.NewPool(),
}
service, err := NewService(ctx)
service, err := NewService(ctx, cfg)
service.genesisTime = timeutils.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
atts, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
copied, err = state.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))

View File

@@ -7,10 +7,11 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/timeutils"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace"
)
@@ -19,8 +20,8 @@ var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error
ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error
HasInitSyncBlock(root [32]byte) bool
}
@@ -29,19 +30,36 @@ type BlockReceiver interface {
// 1. Validate block, apply state transition and update check points
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
receivedTime := time.Now()
receivedTime := timeutils.Now()
blockCopy := block.Copy()
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
traceutil.AnnotateError(span, err)
return err
}
// Update and save head block after fork choice.
if !featureconfig.Get().UpdateHeadTimely {
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.WithError(err).Warn("Could not update head")
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block().Slot(),
BlockRoot: blockRoot,
SignedBlock: blockCopy,
Verified: true,
},
})
}
// Handle post block operations such as attestations and exits.
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
return err
@@ -68,7 +86,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBloc
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -76,14 +94,14 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
traceutil.AnnotateError(span, err)
return err
}
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
traceutil.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
@@ -101,10 +119,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
}
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
// log.Fatalf will prevent defer from being called
span.End()
// Exit run time if the node failed to verify weak subjectivity checkpoint.
@@ -119,7 +134,7 @@ func (s *Service) HasInitSyncBlock(root [32]byte) bool {
return s.hasInitSyncBlock(root)
}
func (s *Service) handlePostBlockOperations(b block.BeaconBlock) error {
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {
return err
@@ -145,7 +160,7 @@ func (s *Service) handlePostBlockOperations(b block.BeaconBlock) error {
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality types.Epoch
if currentEpoch > s.finalizedCheckpt.Epoch {

View File

@@ -13,23 +13,22 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestService_ReceiveBlock(t *testing.T) {
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
genesis, keys := testutil.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
assert.NoError(t, err)
return blk
}
@@ -49,7 +48,7 @@ func TestService_ReceiveBlock(t *testing.T) {
{
name: "applies block with state transition",
args: args{
block: genFullBlock(t, util.DefaultBlockGenConfig(), 2 /*slot*/),
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if hs := s.head.state.Slot(); hs != 2 {
@@ -64,7 +63,7 @@ func TestService_ReceiveBlock(t *testing.T) {
name: "saves attestations to pool",
args: args{
block: genFullBlock(t,
&util.BlockGenConfig{
&testutil.BlockGenConfig{
NumProposerSlashings: 0,
NumAttesterSlashings: 0,
NumAttestations: 2,
@@ -84,7 +83,7 @@ func TestService_ReceiveBlock(t *testing.T) {
{
name: "updates exit pool",
args: args{
block: genFullBlock(t, &util.BlockGenConfig{
block: genFullBlock(t, &testutil.BlockGenConfig{
NumProposerSlashings: 0,
NumAttesterSlashings: 0,
NumAttestations: 0,
@@ -108,7 +107,7 @@ func TestService_ReceiveBlock(t *testing.T) {
{
name: "notifies block processed on state feed",
args: args{
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
@@ -124,7 +123,7 @@ func TestService_ReceiveBlock(t *testing.T) {
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
@@ -136,9 +135,8 @@ func TestService_ReceiveBlock(t *testing.T) {
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx)
s, err := NewService(ctx, cfg)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -147,7 +145,7 @@ func TestService_ReceiveBlock(t *testing.T) {
s.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
err = s.ReceiveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -160,13 +158,13 @@ func TestService_ReceiveBlock(t *testing.T) {
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
genesis, keys := testutil.DeterministicGenesisState(t, 64)
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
@@ -178,9 +176,8 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx)
s, err := NewService(ctx, cfg)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -192,7 +189,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
require.NoError(t, s.ReceiveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(b), root))
wg.Done()
}()
wg.Wait()
@@ -206,9 +203,9 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
func TestService_ReceiveBlockBatch(t *testing.T) {
ctx := context.Background()
genesis, keys := util.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
genesis, keys := testutil.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
assert.NoError(t, err)
return blk
}
@@ -225,7 +222,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
{
name: "applies block with state transition",
args: args{
block: genFullBlock(t, util.DefaultBlockGenConfig(), 2 /*slot*/),
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
@@ -235,7 +232,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
{
name: "notifies block processed on state feed",
args: args{
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
@@ -250,7 +247,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
cfg := &config{
cfg := &Config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
@@ -260,9 +257,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx)
s, err := NewService(ctx, cfg)
require.NoError(t, err)
s.cfg = cfg
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
@@ -273,7 +269,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
s.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
blks := []interfaces.SignedBeaconBlock{interfaces.WrappedPhase0SignedBeaconBlock(tt.args.block)}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {
@@ -287,14 +283,13 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasInitSyncBlock(t *testing.T) {
s, err := NewService(context.Background())
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
require.NoError(t, err)
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
r := [32]byte{'a'}
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
}
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()))
s.saveInitSyncBlock(r, interfaces.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()))
if !s.HasInitSyncBlock(r) {
t.Error("Should have block")
}
@@ -303,9 +298,8 @@ func TestService_HasInitSyncBlock(t *testing.T) {
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.finalizedCheckpt = &ethpb.Checkpoint{}
@@ -317,9 +311,8 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -331,9 +324,8 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 10000000}
s.genesisTime = time.Now()

View File

@@ -1,5 +1,6 @@
// Package blockchain defines the life-cycle of the blockchain at the core of
// Ethereum, including processing of new blocks and attestations using proof of stake.
// eth2, including processing of new blocks and attestations using casper
// proof of stake.
package blockchain
import (
@@ -11,13 +12,12 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
@@ -26,14 +26,15 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -45,7 +46,7 @@ const headSyncMinEpochsAfterCheckpoint = 128
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
cfg *Config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
@@ -60,15 +61,15 @@ type Service struct {
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
wsVerified bool
}
// config options for the service.
type config struct {
// Config options for the service.
type Config struct {
BeaconBlockBuf int
ChainStartFetcher powchain.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
@@ -80,42 +81,56 @@ type config struct {
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
OpsService *attestations.Service
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
FinalizedStateAtStartUp state.BeaconState
}
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
srv := &Service{
return &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
justifiedBalances: make([]uint64, 0),
cfg: &config{},
}
for _, opt := range opts {
if err := opt(srv); err != nil {
return nil, err
}
}
var err error
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
}
return srv, nil
}, nil
}
// Start a blockchain service's main event loop.
func (s *Service) Start() {
beaconState := s.cfg.FinalizedStateAtStartUp
// For running initial sync with state cache, in an event of restart, we use
// last finalized check point as start point to sync instead of head
// state. This is because we no longer save state every slot during sync.
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
r := bytesutil.ToBytes32(cp.Root)
// Before the first finalized epoch, in the current epoch,
// the finalized root is defined as zero hashes instead of genesis root hash.
// We want to use genesis root to retrieve for state.
if r == params.BeaconConfig().ZeroHash {
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if genesisBlock != nil && !genesisBlock.IsNil() {
r, err = genesisBlock.Block().HashTreeRoot()
if err != nil {
log.Fatalf("Could not tree hash genesis block: %v", err)
}
}
}
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
// Make sure that attestation processor is subscribed and ready for state initializing event.
attestationProcessorSubscribed := make(chan struct{}, 1)
@@ -124,7 +139,7 @@ func (s *Service) Start() {
if beaconState != nil && !beaconState.IsNil() {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
s.cfg.OpsService.SetGenesisTime(beaconState.GenesisTime())
if err := s.initializeChainInfo(s.ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
@@ -138,7 +153,7 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slots.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
@@ -150,17 +165,17 @@ func (s *Service) Start() {
}
// Resume fork choice.
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.justifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
log.Fatalf("Could not cache justified state balances: %v", err)
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
s.prevJustifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = copyutil.CopyCheckpoint(finalizedCheckpoint)
s.prevFinalizedCheckpt = copyutil.CopyCheckpoint(finalizedCheckpoint)
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
ss, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err != nil {
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
}
@@ -175,11 +190,9 @@ func (s *Service) Start() {
}
}
// not attempting to save initial sync blocks here, because there shouldn't be until
// after the statefeed.Initialized event is fired (below)
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
// Exit run time if the node failed to verify weak subjectivity checkpoint.
log.Fatalf("could not verify initial checkpoint provided for chain sync, with err=: %v", err)
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
@@ -240,7 +253,7 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
// We send out a state initialized event to the rest of the services
// running in the beacon node.
@@ -259,14 +272,14 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
func (s *Service) initializeBeaconChain(
ctx context.Context,
genesisTime time.Time,
preGenesisState state.BeaconState,
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
preGenesisState iface.BeaconState,
eth1data *ethpb.Eth1Data) (iface.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
defer span.End()
s.genesisTime = genesisTime
unixTime := uint64(genesisTime.Unix())
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
if err != nil {
return nil, errors.Wrap(err, "could not initialize genesis state")
}
@@ -284,11 +297,11 @@ func (s *Service) initializeBeaconChain(
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
if err := helpers.UpdateProposerIndicesInCache(genesisState); err != nil {
return nil, err
}
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
s.cfg.OpsService.SetGenesisTime(genesisState.GenesisTime())
return genesisState, nil
}
@@ -320,7 +333,7 @@ func (s *Service) Status() error {
}
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
func (s *Service) saveGenesisData(ctx context.Context, genesisState iface.BeaconState) error {
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis data")
}
@@ -339,14 +352,14 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
// Finalized checkpoint at genesis is a zero hash.
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.justifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
return err
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.prevJustifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
s.prevFinalizedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
genesisBlk.Block().Slot(),
@@ -368,8 +381,8 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not get genesis block from db")
}
if err := helpers.BeaconBlockIsNil(genesisBlock); err != nil {
return err
if genesisBlock == nil || genesisBlock.IsNil() {
return errors.New("no genesis block in db")
}
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
if err != nil {
@@ -387,9 +400,9 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
return errors.New("no finalized epoch in the database")
}
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
var finalizedState state.BeaconState
var finalizedState iface.BeaconState
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
finalizedState, err = s.cfg.StateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
@@ -399,7 +412,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not retrieve head block")
}
headEpoch := slots.ToEpoch(headBlock.Block().Slot())
headEpoch := helpers.SlotToEpoch(headBlock.Block().Slot())
var epochsSinceFinality types.Epoch
if headEpoch > finalized.Epoch {
epochsSinceFinality = headEpoch - finalized.Epoch
@@ -411,7 +424,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not hash head block")
}
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
finalizedState, err := s.cfg.StateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}

View File

@@ -6,7 +6,7 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/sirupsen/logrus"
)
@@ -18,7 +18,7 @@ func init() {
func TestChainService_SaveHead_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
cfg: &Config{BeaconDB: beaconDB},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -8,31 +8,31 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
)
@@ -63,11 +63,6 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
@@ -75,40 +70,39 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
ctx := context.Background()
var web3Service *powchain.Service
var err error
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
bState, _ := testutil.DeterministicGenesisState(t, 10)
pbState, err := stateV0.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
err = beaconDB.SavePowchainData(ctx, &ethpb.ETH1ChainData{
err = beaconDB.SavePowchainData(ctx, &protodb.ETH1ChainData{
BeaconState: pbState,
Trie: &ethpb.SparseMerkleTrie{},
CurrentEth1Data: &ethpb.LatestETH1Data{
Trie: &protodb.SparseMerkleTrie{},
CurrentEth1Data: &protodb.LatestETH1Data{
BlockHash: make([]byte, 32),
},
ChainstartData: &ethpb.ChainStartData{
ChainstartData: &protodb.ChainStartData{
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
},
DepositContainers: []*ethpb.DepositContainer{},
DepositContainers: []*protodb.DepositContainer{},
})
require.NoError(t, err)
web3Service, err = powchain.NewService(
ctx,
powchain.WithDatabase(beaconDB),
powchain.WithHttpEndpoints([]string{endpoint}),
powchain.WithDepositContractAddress(common.Address{}),
)
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
BeaconDB: beaconDB,
HttpEndpoints: []string{endpoint},
DepositContract: common.Address{},
})
require.NoError(t, err, "Unable to set up web3 service")
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
cfg := &Config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
DepositCache: depositCache,
@@ -118,15 +112,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
AttPool: attestations.NewPool(),
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
AttService: attService,
OpsService: opsService,
}
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
chainService, err := NewService(ctx)
chainService, err := NewService(ctx, cfg)
require.NoError(t, err, "Unable to setup chain service")
chainService.cfg = cfg
chainService.genesisTime = time.Unix(1, 0) // non-zero time
return chainService
@@ -139,11 +132,11 @@ func TestChainStartStop_Initialized(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := util.NewBeaconBlock()
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := util.NewBeaconState()
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(1))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
@@ -151,7 +144,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
chainService.Start()
@@ -169,16 +162,16 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := util.NewBeaconBlock()
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := util.NewBeaconState()
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
chainService.Start()
@@ -199,12 +192,12 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
// Set up 10 deposits pre chain start for validators to register
count := uint64(10)
deposits, _, err := util.DeterministicDepositsAndKeys(count)
deposits, _, err := testutil.DeterministicDepositsAndKeys(count)
require.NoError(t, err)
trie, _, err := util.DepositTrieFromDeposits(deposits)
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
require.NoError(t, err)
hashTreeRoot := trie.HashTreeRoot()
genState, err := transition.EmptyGenesisState()
genState, err := state.EmptyGenesisState()
require.NoError(t, err)
err = genState.SetEth1Data(&ethpb.Eth1Data{
DepositRoot: hashTreeRoot[:],
@@ -238,18 +231,18 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := util.NewBeaconBlock()
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := util.NewBeaconState()
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesisBlk)))
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(0))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
chainService.Start()
@@ -264,17 +257,17 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesis := testutil.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock := testutil.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
@@ -282,10 +275,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
c.cfg.FinalizedStateAtStartUp = headState
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(headBlock)))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -306,17 +298,17 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesis := testutil.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesis)))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock := testutil.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
@@ -324,8 +316,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(headBlock)))
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
require.NoError(t, err)
@@ -348,28 +340,28 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesisBlock := util.NewBeaconBlock()
genesisBlock := testutil.NewBeaconBlock()
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(genesisBlock)))
finalizedBlock := util.NewBeaconBlock()
finalizedBlock := testutil.NewBeaconBlock()
finalizedBlock.Block.Slot = finalizedSlot
finalizedBlock.Block.ParentRoot = genesisRoot[:]
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
// Set head slot close to the finalization point, no head sync is triggered.
headBlock := util.NewBeaconBlock()
headBlock := testutil.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot + params.BeaconConfig().SlotsPerEpoch*5
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(headBlock)))
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
@@ -378,12 +370,12 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: slots.ToEpoch(finalizedBlock.Block.Slot),
Epoch: helpers.SlotToEpoch(finalizedBlock.Block.Slot),
Root: finalizedRoot[:],
}))
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
c.cfg.FinalizedStateAtStartUp = headState
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
require.NoError(t, err)
@@ -395,12 +387,12 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
assert.LogsDoNotContain(t, hook, "Regenerating state from the last checkpoint at slot")
// Set head slot far beyond the finalization point, head sync should be triggered.
headBlock = util.NewBeaconBlock()
headBlock = testutil.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot + params.BeaconConfig().SlotsPerEpoch*headSyncMinEpochsAfterCheckpoint
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err = headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
require.NoError(t, beaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(headBlock)))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
@@ -420,16 +412,16 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
blk := util.NewBeaconBlock()
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 1
r, err := blk.HashTreeRoot()
require.NoError(t, err)
newState, err := util.NewBeaconState()
newState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, interfaces.WrappedPhase0SignedBeaconBlock(blk), r, newState))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)
@@ -442,15 +434,15 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
block := util.NewBeaconBlock()
block := testutil.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
beaconState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, interfaces.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -460,15 +452,15 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
}
block := util.NewBeaconBlock()
block := testutil.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
require.NoError(t, err)
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
s.saveInitSyncBlock(r, interfaces.WrappedPhase0SignedBeaconBlock(block))
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
@@ -491,10 +483,10 @@ func BenchmarkHasBlockDB(b *testing.B) {
beaconDB := testDB.SetupDB(b)
ctx := context.Background()
s := &Service{
cfg: &config{BeaconDB: beaconDB},
cfg: &Config{BeaconDB: beaconDB},
}
block := util.NewBeaconBlock()
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
block := testutil.NewBeaconBlock()
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, interfaces.WrappedPhase0SignedBeaconBlock(block)))
r, err := block.Block.HashTreeRoot()
require.NoError(b, err)
@@ -508,16 +500,16 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := stateV0.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, interfaces.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -7,11 +7,9 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
visibility = [
"//beacon-chain:__subpackages__",
"//testing:__subpackages__",
"//testing/fuzz:__pkg__",
"//fuzz:__pkg__",
],
deps = [
"//async/event:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
@@ -20,12 +18,14 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//beacon-chain/state/interface:go_default_library",
"//beacon-chain/state/stateV0:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/interfaces:go_default_library",
"//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
@@ -19,29 +18,31 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
// ChainService defines the mock interface for testing
type ChainService struct {
State state.BeaconState
State iface.BeaconState
Root []byte
Block block.SignedBeaconBlock
Block interfaces.SignedBeaconBlock
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
BlocksReceived []block.SignedBeaconBlock
BlocksReceived []interfaces.SignedBeaconBlock
Balance *precompute.Balance
Genesis time.Time
ValidatorsRoot [32]byte
CanonicalRoots map[[32]byte]bool
Fork *ethpb.Fork
Fork *pb.Fork
ETH1Data *ethpb.Eth1Data
DB db.Database
stateNotifier statefeed.Notifier
@@ -51,13 +52,6 @@ type ChainService struct {
ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
SyncCommitteeIndices []types.CommitteeIndex
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
PublicKey [48]byte
SyncCommitteePubkeys [][]byte
InitSyncBlockRoots map[[32]byte]bool
}
// StateNotifier mocks the same method in the chain service.
@@ -156,9 +150,9 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
}
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interfaces.SignedBeaconBlock, _ [32]byte) error {
if s.State == nil {
s.State = &v1.BeaconState{}
s.State = &stateV0.BeaconState{}
}
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
@@ -183,9 +177,9 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock, _ [][32]byte) error {
if s.State == nil {
s.State = &v1.BeaconState{}
s.State = &stateV0.BeaconState{}
}
for _, block := range blks {
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
@@ -212,9 +206,9 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, _ [32]byte) error {
if s.State == nil {
s.State = &v1.BeaconState{}
s.State = &stateV0.BeaconState{}
}
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
@@ -255,17 +249,17 @@ func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
}
// HeadBlock mocks HeadBlock method in chain service.
func (s *ChainService) HeadBlock(context.Context) (block.SignedBeaconBlock, error) {
func (s *ChainService) HeadBlock(context.Context) (interfaces.SignedBeaconBlock, error) {
return s.Block, nil
}
// HeadState mocks HeadState method in chain service.
func (s *ChainService) HeadState(context.Context) (state.BeaconState, error) {
func (s *ChainService) HeadState(context.Context) (iface.BeaconState, error) {
return s.State, nil
}
// CurrentFork mocks HeadState method in chain service.
func (s *ChainService) CurrentFork() *ethpb.Fork {
func (s *ChainService) CurrentFork() *pb.Fork {
return s.Fork
}
@@ -294,17 +288,17 @@ func (s *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attest
return nil
}
// AttestationTargetState mocks AttestationTargetState method in chain service.
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
// AttestationPreState mocks AttestationPreState method in chain service.
func (s *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (iface.BeaconState, error) {
return s.State, nil
}
// HeadValidatorsIndices mocks the same method in the chain service.
func (s *ChainService) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
func (s *ChainService) HeadValidatorsIndices(_ context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
if s.State == nil {
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.State, epoch)
return helpers.ActiveValidatorIndices(s.State, epoch)
}
// HeadSeed mocks the same method in the chain service.
@@ -361,11 +355,8 @@ func (s *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error)
}
// HasInitSyncBlock mocks the same method in the chain service.
func (s *ChainService) HasInitSyncBlock(rt [32]byte) bool {
if s.InitSyncBlockRoots == nil {
return false
}
return s.InitSyncBlockRoots[rt]
func (s *ChainService) HasInitSyncBlock(_ [32]byte) bool {
return false
}
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
@@ -402,38 +393,3 @@ func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
},
[]types.Slot{0, 1}
}
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
func (s *ChainService) HeadPublicKeyToValidatorIndex(_ context.Context, _ [48]byte) (types.ValidatorIndex, bool) {
return 0, true
}
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.ValidatorIndex) ([48]byte, error) {
return s.PublicKey, nil
}
// HeadSyncCommitteeIndices mocks HeadSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
return s.SyncCommitteeIndices, nil
}
// HeadSyncCommitteePubKeys mocks HeadSyncCommitteePubKeys and always return empty nil.
func (s *ChainService) HeadSyncCommitteePubKeys(_ context.Context, _ types.Slot, _ types.CommitteeIndex) ([][]byte, error) {
return s.SyncCommitteePubkeys, nil
}
// HeadSyncCommitteeDomain mocks HeadSyncCommitteeDomain and always return empty nil.
func (s *ChainService) HeadSyncCommitteeDomain(_ context.Context, _ types.Slot) ([]byte, error) {
return s.SyncCommitteeDomain, nil
}
// HeadSyncSelectionProofDomain mocks HeadSyncSelectionProofDomain and always return empty nil.
func (s *ChainService) HeadSyncSelectionProofDomain(_ context.Context, _ types.Slot) ([]byte, error) {
return s.SyncSelectionProofDomain, nil
}
// HeadSyncContributionProofDomain mocks HeadSyncContributionProofDomain and always return empty nil.
func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ types.Slot) ([]byte, error) {
return s.SyncContributionProofDomain, nil
}

View File

@@ -4,94 +4,57 @@ import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
var errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
var errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
type weakSubjectivityDB interface {
HasBlock(ctx context.Context, blockRoot [32]byte) bool
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
}
type WeakSubjectivityVerifier struct {
enabled bool
verified bool
root [32]byte
epoch types.Epoch
slot types.Slot
db weakSubjectivityDB
}
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
// TODO(7342): Weak subjectivity checks are currently optional. When we require the flag to be specified
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
return &WeakSubjectivityVerifier{
enabled: false,
}, nil
// VerifyWeakSubjectivityRoot verifies the weak subjectivity root in the service struct.
// Reference design: https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
// TODO(7342): Remove the following to fully use weak subjectivity in production.
if s.cfg.WeakSubjectivityCheckpt == nil || len(s.cfg.WeakSubjectivityCheckpt.Root) == 0 || s.cfg.WeakSubjectivityCheckpt.Epoch == 0 {
return nil
}
startSlot, err := slots.EpochStart(wsc.Epoch)
// Do nothing if the weak subjectivity has previously been verified,
// or weak subjectivity epoch is higher than last finalized epoch.
if s.wsVerified {
return nil
}
if s.cfg.WeakSubjectivityCheckpt.Epoch > s.finalizedCheckpt.Epoch {
return nil
}
r := bytesutil.ToBytes32(s.cfg.WeakSubjectivityCheckpt.Root)
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
// Save initial sync cached blocks to DB.
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
// A node should have the weak subjectivity block in the DB.
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
return fmt.Errorf("node does not have root in DB: %#x", r)
}
startSlot, err := helpers.StartSlot(s.cfg.WeakSubjectivityCheckpt.Epoch)
if err != nil {
return nil, err
return err
}
return &WeakSubjectivityVerifier{
enabled: true,
verified: false,
root: bytesutil.ToBytes32(wsc.Root),
epoch: wsc.Epoch,
db: db,
slot: startSlot,
}, nil
}
// VerifyWeakSubjectivity verifies the weak subjectivity root in the service struct.
// Reference design: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, finalizedEpoch types.Epoch) error {
if v.verified || !v.enabled {
return nil
}
// Two conditions are described in the specs:
// IF epoch_number > store.finalized_checkpoint.epoch,
// then ASSERT during block sync that block with root block_root
// is in the sync path at epoch epoch_number. Emit descriptive critical error if this assert fails,
// then exit client process.
// we do not handle this case ^, because we can only blocks that have been processed / are currently
// in line for finalization, we don't have the ability to look ahead. so we only satisfy the following:
// IF epoch_number <= store.finalized_checkpoint.epoch,
// then ASSERT that the block in the canonical chain at epoch epoch_number has root block_root.
// Emit descriptive critical error if this assert fails, then exit client process.
if v.epoch > finalizedEpoch {
return nil
}
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", v.root, v.epoch)
if !v.db.HasBlock(ctx, v.root) {
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
}
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
roots, err := v.db.BlockRoots(ctx, filter)
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
roots, err := s.cfg.BeaconDB.BlockRoots(ctx, filter)
if err != nil {
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
return err
}
for _, root := range roots {
if v.root == root {
if r == root {
log.Info("Weak subjectivity check has passed")
v.verified = true
s.wsVerified = true
return nil
}
}
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
}

View File

@@ -4,77 +4,78 @@ import (
"context"
"testing"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 32
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
wsVerified bool
wantErr error
wantErr bool
checkpt *ethpb.Checkpoint
finalizedEpoch types.Epoch
errString string
name string
}{
{
name: "nil root and epoch",
name: "nil root and epoch",
wantErr: false,
},
{
name: "already verified",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 2,
wsVerified: true,
wantErr: false,
},
{
name: "not yet to verify, ws epoch higher than finalized epoch",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 1,
wantErr: false,
},
{
name: "can't find the block in DB",
checkpt: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: 1},
finalizedEpoch: 3,
wantErr: errWSBlockNotFound,
wantErr: true,
errString: "node does not have root in DB",
},
{
name: "can't find the block corresponds to ws epoch in DB",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
finalizedEpoch: 3,
wantErr: errWSBlockNotFoundInEpoch,
wantErr: true,
errString: "node does not have root in db corresponding to epoch",
},
{
name: "can verify and pass",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 1},
finalizedEpoch: 3,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
cfg: &Config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
wsVerified: tt.wsVerified,
finalizedCheckpt: &ethpb.Checkpoint{Epoch: tt.finalizedEpoch},
wsVerifier: wv,
}
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.finalizedCheckpt.Epoch)
if tt.wantErr == nil {
require.NoError(t, err)
} else {
require.Equal(t, true, errors.Is(err, tt.wantErr))
if err := s.VerifyWeakSubjectivityRoot(context.Background()); (err != nil) != tt.wantErr {
require.ErrorContains(t, tt.errString, err)
}
})
}

View File

@@ -1,4 +1,5 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
# gazelle:exclude committee_disabled.go
# gazelle:exclude proposer_indices_disabled.go
@@ -10,47 +11,34 @@ go_library(
"committees.go",
"common.go",
"doc.go",
"error.go",
"proposer_indices_type.go",
"skip_slot_cache.go",
"subnet_ids.go",
"sync_committee_head_state.go",
"sync_subnet_ids.go",
] + select({
"//testing/fuzz:fuzzing_enabled": [
"active_balance_disabled.go",
"//fuzz:fuzzing_enabled": [
"committee_disabled.go",
"proposer_indices_disabled.go",
"sync_committee_disabled.go",
],
"//conditions:default": [
"active_balance.go",
"committee.go",
"proposer_indices.go",
"sync_committee.go",
],
}),
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/fuzz:__pkg__",
"//fuzz:__pkg__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/slice:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//beacon-chain/state/interface:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/copyutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
@@ -63,7 +51,6 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
"active_balance_test.go",
"attestation_data_test.go",
"cache_test.go",
"checkpoint_state_test.go",
@@ -72,22 +59,17 @@ go_test(
"proposer_indices_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"sync_committee_head_state_test.go",
"sync_committee_test.go",
"sync_subnet_ids_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//beacon-chain/state/interface:go_default_library",
"//beacon-chain/state/stateV0:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -1,119 +0,0 @@
// +build !libfuzzer
package cache
import (
"encoding/binary"
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethTypes "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
)
var (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = uint64(4)
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_miss",
Help: "The number of get requests that aren't present in the cache.",
})
// BalanceCacheHit tracks the number of balance requests that are in the cache.
balanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_hit",
Help: "The number of get requests that are present in the cache.",
})
)
// BalanceCache is a struct with 1 LRU cache for looking up balance by epoch.
type BalanceCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(int(maxBalanceCacheSize)),
}
}
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
func (c *BalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, balance uint64) error {
if !features.Get().EnableActiveBalanceCache {
return nil
}
key, err := balanceCacheKey(st)
if err != nil {
return err
}
c.lock.Lock()
defer c.lock.Unlock()
_ = c.cache.Add(key, balance)
return nil
}
// Get returns the current epoch's effective balance for state `st` in cache.
func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
if !features.Get().EnableActiveBalanceCache {
return 0, ErrNotFound
}
key, err := balanceCacheKey(st)
if err != nil {
return 0, err
}
c.lock.RLock()
defer c.lock.RUnlock()
value, exists := c.cache.Get(key)
if !exists {
balanceCacheMiss.Inc()
return 0, ErrNotFound
}
balanceCacheHit.Inc()
return value.(uint64), nil
}
// Given input state `st`, balance key is constructed as:
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
if err != nil {
// impossible condition due to early division
return "", errors.Errorf("start slot calculation overflows: %v", err)
}
prevSlot := ethTypes.Slot(0)
if epochStartSlot > 1 {
prevSlot = epochStartSlot - 1
}
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
// impossible condition because index is always constrained within state
return "", err
}
// Mix in current epoch
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
key := append(r, b...)
// Mix in validator count
b = make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
key = append(key, b...)
return string(key), nil
}

View File

@@ -1,31 +0,0 @@
// +build libfuzzer
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// FakeBalanceCache is a fake struct with 1 LRU cache for looking up balance by epoch.
type FakeBalanceCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *FakeBalanceCache {
return &FakeBalanceCache{}
}
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
func (c *FakeBalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, balance uint64) error {
return nil
}
// Get returns the current epoch's effective balance for state `st` in cache.
func (c *FakeBalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
return 0, nil
}

View File

@@ -1,81 +0,0 @@
package cache
import (
"encoding/binary"
"math"
"testing"
types "github.com/prysmaticlabs/eth2-types"
state "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestBalanceCache_AddGetBalance(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableActiveBalanceCache: true,
})
defer resetCfg()
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b
}
raw := &ethpb.BeaconState{
BlockRoots: blockRoots,
}
st, err := state.InitializeFromProto(raw)
require.NoError(t, err)
cache := NewEffectiveBalanceCache()
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b := uint64(100)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err := cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000))
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b = uint64(200)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b = uint64(300)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
}
func TestBalanceCache_BalanceKey(t *testing.T) {
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b
}
raw := &ethpb.BeaconState{
BlockRoots: blockRoots,
}
st, err := state.InitializeFromProto(raw)
require.NoError(t, err)
require.NoError(t, st.SetSlot(types.Slot(math.MaxUint64)))
_, err = balanceCacheKey(st)
require.NoError(t, err)
}

View File

@@ -10,7 +10,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"k8s.io/client-go/tools/cache"
)
@@ -97,7 +98,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
attestationCacheHit.Inc()
return ethpb.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
return copyutil.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
}
attestationCacheMiss.Inc()
return nil, nil

View File

@@ -5,8 +5,8 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"google.golang.org/protobuf/proto"
)

View File

@@ -6,10 +6,9 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
var (
@@ -37,17 +36,21 @@ type CheckpointStateCache struct {
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
func NewCheckpointStateCache() *CheckpointStateCache {
cache, err := lru.New(maxCheckpointStateSize)
if err != nil {
panic(err)
}
return &CheckpointStateCache{
cache: lruwrpr.New(maxCheckpointStateSize),
cache: cache,
}
}
// StateByCheckpoint fetches state by checkpoint. Returns true with a
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (iface.BeaconState, error) {
c.lock.RLock()
defer c.lock.RUnlock()
h, err := hash.HashProto(cp)
h, err := hashutil.HashProto(cp)
if err != nil {
return nil, err
}
@@ -57,7 +60,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
if exists && item != nil {
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
return item.(iface.BeaconState), nil
}
checkpointStateMiss.Inc()
@@ -66,10 +69,10 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
// recently added CheckpointState object if the cache size has ready the max cache size limit.
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s iface.ReadOnlyBeaconState) error {
c.lock.Lock()
defer c.lock.Unlock()
h, err := hash.HashProto(cp)
h, err := hashutil.HashProto(cp)
if err != nil {
return err
}

View File

@@ -4,13 +4,14 @@ import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"google.golang.org/protobuf/proto"
)
@@ -18,48 +19,48 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
cache := NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := v1.InitializeFromProto(&ethpb.BeaconState{
st, err := stateV0.InitializeFromProto(&pb.BeaconState{
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
Slot: 64,
})
require.NoError(t, err)
s, err := cache.StateByCheckpoint(cp1)
state, err := cache.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "Expected state not to exist in empty cache")
assert.Equal(t, iface.BeaconState(nil), state, "Expected state not to exist in empty cache")
require.NoError(t, cache.AddCheckpointState(cp1, st))
s, err = cache.StateByCheckpoint(cp1)
state, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
pbState1, err := v1.ProtobufBeaconState(s.InnerStateUnsafe())
pbState1, err := stateV0.ProtobufBeaconState(state.InnerStateUnsafe())
require.NoError(t, err)
pbstate, err := v1.ProtobufBeaconState(st.InnerStateUnsafe())
pbState2, err := stateV0.ProtobufBeaconState(st.InnerStateUnsafe())
require.NoError(t, err)
if !proto.Equal(pbState1, pbstate) {
if !proto.Equal(pbState1, pbState2) {
t.Error("incorrectly cached state")
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
st2, err := v1.InitializeFromProto(&ethpb.BeaconState{
st2, err := stateV0.InitializeFromProto(&pb.BeaconState{
Slot: 128,
})
require.NoError(t, err)
require.NoError(t, cache.AddCheckpointState(cp2, st2))
s, err = cache.StateByCheckpoint(cp2)
state, err = cache.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.DeepEqual(t, st2.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
assert.DeepEqual(t, st2.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
s, err = cache.StateByCheckpoint(cp1)
state, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.DeepEqual(t, st.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
assert.DeepEqual(t, st.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {
c := NewCheckpointStateCache()
st, err := v1.InitializeFromProto(&ethpb.BeaconState{
st, err := stateV0.InitializeFromProto(&pb.BeaconState{
Slot: 0,
})
require.NoError(t, err)

View File

@@ -3,20 +3,15 @@
package cache
import (
"context"
"errors"
"math"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
mathutil "github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
var (
@@ -40,7 +35,6 @@ var (
type CommitteeCache struct {
CommitteeCache *lru.Cache
lock sync.RWMutex
inProgress map[string]bool
}
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
@@ -54,18 +48,22 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
cCache, err := lru.New(int(maxCommitteesCacheSize))
// An error is only returned if the size of the cache is
// <= 0.
if err != nil {
panic(err)
}
return &CommitteeCache{
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
inProgress: make(map[string]bool),
CommitteeCache: cCache,
}
}
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
func (c *CommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
if err := c.checkInProgress(ctx, seed); err != nil {
return nil, err
}
func (c *CommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists := c.CommitteeCache.Get(key(seed))
if exists {
@@ -85,10 +83,7 @@ func (c *CommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [3
committeeCountPerSlot = item.CommitteeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
}
indexOffSet, err := mathutil.Add64(uint64(index), uint64(slot.ModSlot(params.BeaconConfig().SlotsPerEpoch).Mul(committeeCountPerSlot)))
if err != nil {
return nil, err
}
indexOffSet := uint64(index) + uint64(slot.ModSlot(params.BeaconConfig().SlotsPerEpoch).Mul(committeeCountPerSlot))
start, end := startEndIndices(item, indexOffSet)
if end > uint64(len(item.ShuffledIndices)) || end < start {
@@ -112,10 +107,9 @@ func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error
}
// ActiveIndices returns the active indices of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndices(ctx context.Context, seed [32]byte) ([]types.ValidatorIndex, error) {
if err := c.checkInProgress(ctx, seed); err != nil {
return nil, err
}
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists := c.CommitteeCache.Get(key(seed))
if exists {
@@ -134,11 +128,9 @@ func (c *CommitteeCache) ActiveIndices(ctx context.Context, seed [32]byte) ([]ty
}
// ActiveIndicesCount returns the active indices count of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndicesCount(ctx context.Context, seed [32]byte) (int, error) {
if err := c.checkInProgress(ctx, seed); err != nil {
return 0, err
}
func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists := c.CommitteeCache.Get(key(seed))
if exists {
CommitteeCacheHit.Inc()
@@ -161,65 +153,17 @@ func (c *CommitteeCache) HasEntry(seed string) bool {
return ok
}
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *CommitteeCache) MarkInProgress(seed [32]byte) error {
c.lock.Lock()
defer c.lock.Unlock()
s := key(seed)
if c.inProgress[s] {
return ErrAlreadyInProgress
}
c.inProgress[s] = true
return nil
}
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *CommitteeCache) MarkNotInProgress(seed [32]byte) error {
c.lock.Lock()
defer c.lock.Unlock()
s := key(seed)
delete(c.inProgress, s)
return nil
}
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
validatorCount := uint64(len(c.ShuffledIndices))
start := slice.SplitOffset(validatorCount, c.CommitteeCount, index)
end := slice.SplitOffset(validatorCount, c.CommitteeCount, index+1)
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
return start, end
}
// Using seed as source for key to handle reorgs in the same epoch.
// The seed is derived from state's array of randao mixes and epoch value
// hashed together. This avoids collisions on different validator set. Spec definition:
// https://github.com/ethereum/consensus-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
func key(seed [32]byte) string {
return string(seed[:])
}
func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) error {
delay := minDelay
// Another identical request may be in progress already. Let's wait until
// any in progress request resolves or our timeout is exceeded.
for {
if ctx.Err() != nil {
return ctx.Err()
}
c.lock.RLock()
if !c.inProgress[key(seed)] {
c.lock.RUnlock()
break
}
c.lock.RUnlock()
// This increasing backoff is to decrease the CPU cycles while waiting
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = math.Min(delay, maxDelay)
}
return nil
}

View File

@@ -3,11 +3,7 @@
// This file is used in fuzzer builds to bypass global committee caches.
package cache
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
)
import types "github.com/prysmaticlabs/eth2-types"
// FakeCommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
type FakeCommitteeCache struct {
@@ -20,7 +16,7 @@ func NewCommitteesCache() *FakeCommitteeCache {
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
func (c *FakeCommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
func (c *FakeCommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
return nil, nil
}
@@ -36,17 +32,12 @@ func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []typ
}
// ActiveIndices returns the active indices of a given seed stored in cache.
func (c *FakeCommitteeCache) ActiveIndices(ctx context.Context, seed [32]byte) ([]types.ValidatorIndex, error) {
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
return nil, nil
}
// ActiveIndicesCount returns the active indices count of a given seed stored in cache.
func (c *FakeCommitteeCache) ActiveIndicesCount(ctx context.Context, seed [32]byte) (int, error) {
return 0, nil
}
// ActiveBalance returns the active balance of a given seed stored in cache.
func (c *FakeCommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
func (c *FakeCommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
return 0, nil
}
@@ -59,13 +50,3 @@ func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIn
func (c *FakeCommitteeCache) HasEntry(string) bool {
return false
}
// MarkInProgress is a stub.
func (c *FakeCommitteeCache) MarkInProgress(seed [32]byte) error {
return nil
}
// MarkNotInProgress is a stub.
func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
return nil
}

View File

@@ -1,12 +1,11 @@
package cache
import (
"context"
"testing"
fuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestCommitteeKeyFuzz_OK(t *testing.T) {
@@ -29,7 +28,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(c))
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
_, err := cache.Committee(0, c.Seed, 0)
require.NoError(t, err)
}
@@ -45,7 +44,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(c))
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
indices, err := cache.ActiveIndices(c.Seed)
require.NoError(t, err)
assert.DeepEqual(t, c.SortedIndices, indices)
}

View File

@@ -1,17 +1,16 @@
package cache
import (
"context"
"math"
"sort"
"strconv"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestCommitteeKeyFn_OK(t *testing.T) {
@@ -42,7 +41,7 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
slot := params.BeaconConfig().SlotsPerEpoch
committeeIndex := types.CommitteeIndex(1)
indices, err := cache.Committee(context.Background(), slot, item.Seed, committeeIndex)
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
@@ -50,7 +49,7 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
require.NoError(t, cache.AddCommitteeShuffledList(item))
wantedIndex := types.CommitteeIndex(0)
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
require.NoError(t, err)
start, end := startEndIndices(item, uint64(wantedIndex))
@@ -61,7 +60,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
indices, err := cache.ActiveIndices(context.Background(), item.Seed)
indices, err := cache.ActiveIndices(item.Seed)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
@@ -69,7 +68,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
require.NoError(t, cache.AddCommitteeShuffledList(item))
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
indices, err = cache.ActiveIndices(item.Seed)
require.NoError(t, err)
assert.DeepEqual(t, item.SortedIndices, indices)
}
@@ -78,13 +77,13 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err := cache.ActiveIndicesCount(item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
require.NoError(t, cache.AddCommitteeShuffledList(item))
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err = cache.ActiveIndicesCount(item.Seed)
require.NoError(t, err)
assert.Equal(t, len(item.SortedIndices), count)
}
@@ -128,6 +127,6 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
assert.NoError(t, err)
_ = cache.CommitteeCache.Add(key, comms)
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
_, err = cache.Committee(0, seed, math.MaxUint64) // Overflow!
require.NotNil(t, err, "Did not fail as expected")
}

View File

@@ -1,7 +1,7 @@
package cache
import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/shared/params"
"k8s.io/client-go/tools/cache"
)

View File

@@ -1,4 +1,5 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
@@ -10,11 +11,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -31,12 +33,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -15,11 +15,11 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -43,7 +43,7 @@ type DepositFetcher interface {
// FinalizedDeposits stores the trie of deposits that have been included
// in the beacon state up to the latest finalized checkpoint.
type FinalizedDeposits struct {
Deposits *trie.SparseMerkleTrie
Deposits *trieutil.SparseMerkleTrie
MerkleTrieIndex int64
}
@@ -59,7 +59,7 @@ type DepositCache struct {
// New instantiates a new deposit cache
func New() (*DepositCache, error) {
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
finalizedDepositsTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
@@ -136,10 +136,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
return
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
log.WithError(err).Error("Could not insert deposit hash")
return
}
depositTrie.Insert(depHash[:], insertIndex)
insertIndex++
}

View File

@@ -7,13 +7,13 @@ import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/trieutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -429,7 +429,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
@@ -487,7 +487,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}

View File

@@ -7,9 +7,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/crypto/hash"
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -99,7 +99,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
return
}
depRoot, err := hash.HashProto(d)
depRoot, err := hashutil.HashProto(d)
if err != nil {
log.Errorf("Could not remove deposit %v", err)
return
@@ -110,7 +110,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
idx := -1
for i, ctnr := range dc.pendingDeposits {
hash, err := hash.HashProto(ctnr.Deposit)
hash, err := hashutil.HashProto(ctnr.Deposit)
if err != nil {
log.Errorf("Could not hash deposit %v", err)
continue

View File

@@ -5,10 +5,10 @@ import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"google.golang.org/protobuf/proto"
)

View File

@@ -1,5 +1,5 @@
// Package cache includes all important caches for the runtime
// of an Ethereum Beacon Node, ensuring the node does not spend
// of an eth2 beacon node, ensuring the node does not spend
// resources computing duplicate operations such as committee
// calculations for validators during the same epoch, etc.
package cache

View File

@@ -1,15 +0,0 @@
package cache
import "errors"
var (
// ErrNilValueProvided for when we try to put a nil value in a cache.
ErrNilValueProvided = errors.New("nil value provided on Put()")
// ErrIncorrectType for when the state is of the incorrect type.
ErrIncorrectType = errors.New("incorrect state type provided")
// ErrNotFound for cache fetches that return a nil value.
ErrNotFound = errors.New("not found in cache")
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
)

View File

@@ -5,9 +5,9 @@ import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProposerKeyFn_OK(t *testing.T) {

View File

@@ -9,8 +9,7 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"go.opencensus.io/trace"
)
@@ -26,7 +25,7 @@ var (
})
)
// SkipSlotCache is used to store the cached results of processing skip slots in transition.ProcessSlots.
// SkipSlotCache is used to store the cached results of processing skip slots in state.ProcessSlots.
type SkipSlotCache struct {
cache *lru.Cache
lock sync.RWMutex
@@ -36,8 +35,12 @@ type SkipSlotCache struct {
// NewSkipSlotCache initializes the map and underlying cache.
func NewSkipSlotCache() *SkipSlotCache {
cache, err := lru.New(8)
if err != nil {
panic(err)
}
return &SkipSlotCache{
cache: lruwrpr.New(8),
cache: cache,
inProgress: make(map[[32]byte]bool),
}
}
@@ -54,7 +57,7 @@ func (c *SkipSlotCache) Disable() {
// Get waits for any in progress calculation to complete before returning a
// cached response, if any.
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState, error) {
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (iface.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
defer span.End()
if c.disabled {
@@ -94,7 +97,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
if exists && item != nil {
skipSlotCacheHit.Inc()
span.AddAttributes(trace.BoolAttribute("hit", true))
return item.(state.BeaconState).Copy(), nil
return item.(iface.BeaconState).Copy(), nil
}
skipSlotCacheMiss.Inc()
span.AddAttributes(trace.BoolAttribute("hit", false))
@@ -133,7 +136,7 @@ func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
}
// Put the response in the cache.
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state iface.BeaconState) error {
if c.disabled {
return nil
}

View File

@@ -5,11 +5,11 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateV0"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSkipSlotCache_RoundTrip(t *testing.T) {
@@ -17,21 +17,21 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
s, err := c.Get(ctx, r)
state, err := c.Get(ctx, r)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "Empty cache returned an object")
assert.Equal(t, iface.BeaconState(nil), state, "Empty cache returned an object")
require.NoError(t, c.MarkInProgress(r))
s, err = v1.InitializeFromProto(&ethpb.BeaconState{
state, err = stateV0.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
require.NoError(t, err)
require.NoError(t, c.Put(ctx, r, s))
require.NoError(t, c.Put(ctx, r, state))
require.NoError(t, c.MarkNotInProgress(r))
res, err := c.Get(ctx, r)
require.NoError(t, err)
assert.DeepEqual(t, res.CloneInnerState(), s.CloneInnerState(), "Expected equal protos to return from cache")
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
}

View File

@@ -7,9 +7,8 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/patrickmn/go-cache"
types "github.com/prysmaticlabs/eth2-types"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type subnetIDs struct {
@@ -28,8 +27,14 @@ func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
attesterCache := lruwrpr.New(cacheSize)
aggregatorCache := lruwrpr.New(cacheSize)
attesterCache, err := lru.New(cacheSize)
if err != nil {
panic(err)
}
aggregatorCache, err := lru.New(cacheSize)
if err != nil {
panic(err)
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerRandomSubnetSubscription)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
@@ -44,7 +49,7 @@ func (s *subnetIDs) AddAttesterSubnetID(slot types.Slot, subnetID uint64) {
ids := []uint64{subnetID}
val, exists := s.attester.Get(slot)
if exists {
ids = slice.UnionUint64(append(val.([]uint64), ids...))
ids = sliceutil.UnionUint64(append(val.([]uint64), ids...))
}
s.attester.Add(slot, ids)
}
@@ -72,7 +77,7 @@ func (s *subnetIDs) AddAggregatorSubnetID(slot types.Slot, subnetID uint64) {
ids := []uint64{subnetID}
val, exists := s.aggregator.Get(slot)
if exists {
ids = slice.UnionUint64(append(val.([]uint64), ids...))
ids = sliceutil.UnionUint64(append(val.([]uint64), ids...))
}
s.aggregator.Add(slot, ids)
}
@@ -117,7 +122,7 @@ func (s *subnetIDs) GetAllSubnets() []uint64 {
}
committees = append(committees, v.Object.([]uint64)...)
}
return slice.SetUint64(committees)
return sliceutil.SetUint64(committees)
}
// AddPersistentCommittee adds the relevant committee for that particular validator along with its

View File

@@ -4,8 +4,8 @@ import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSubnetIDsCache_RoundTrip(t *testing.T) {

View File

@@ -1,187 +0,0 @@
// +build !libfuzzer
package cache
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"k8s.io/client-go/tools/cache"
)
var (
maxSyncCommitteeSize = uint64(3) // Allows 3 forks to happen around `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary.
// SyncCommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
SyncCommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_committee_index_cache_miss_total",
Help: "The number of committee requests that aren't present in the sync committee index cache.",
})
// SyncCommitteeCacheHit tracks the number of committee requests that are in the cache.
SyncCommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_committee_index_cache_hit_total",
Help: "The number of committee requests that are present in the sync committee index cache.",
})
)
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
// It is thread safe with concurrent read write.
type SyncCommitteeCache struct {
cache *cache.FIFO
lock sync.RWMutex
}
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
// key and `vIndexToPositionMap` is value. Inside `vIndexToPositionMap`, validator positions
// are cached where key is the validator index and the value is the `positionInCommittee` struct.
type syncCommitteeIndexPosition struct {
currentSyncCommitteeRoot [32]byte
vIndexToPositionMap map[types.ValidatorIndex]*positionInCommittee
}
// Index position of individual validator of current period and next period sync committee.
type positionInCommittee struct {
currentPeriod []types.CommitteeIndex
nextPeriod []types.CommitteeIndex
}
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *SyncCommitteeCache {
return &SyncCommitteeCache{
cache: cache.NewFIFO(keyFn),
}
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
// sync committee. If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
func (s *SyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.idxPositionInCommittee(root, valIdx)
if err != nil {
return nil, err
}
if pos == nil {
return []types.CommitteeIndex{}, nil
}
return pos.currentPeriod, nil
}
// NextPeriodIndexPosition returns next period index position of a validator index in respect with sync committee.
// If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.idxPositionInCommittee(root, valIdx)
if err != nil {
return nil, err
}
if pos == nil {
return []types.CommitteeIndex{}, nil
}
return pos.nextPeriod, nil
}
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx types.ValidatorIndex,
) (*positionInCommittee, error) {
obj, exists, err := s.cache.GetByKey(key(root))
if err != nil {
return nil, err
}
if !exists {
SyncCommitteeCacheMiss.Inc()
return nil, ErrNonExistingSyncCommitteeKey
}
item, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return nil, errNotSyncCommitteeIndexPosition
}
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
if !ok {
SyncCommitteeCacheMiss.Inc()
return nil, nil
}
SyncCommitteeCacheHit.Inc()
return idxInCommittee, nil
}
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconStateAltair) error {
csc, err := st.CurrentSyncCommittee()
if err != nil {
return err
}
positionsMap := make(map[types.ValidatorIndex]*positionInCommittee)
for i, pubkey := range csc.Pubkeys {
p := bytesutil.ToBytes48(pubkey)
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
if !ok {
continue
}
if _, ok := positionsMap[validatorIndex]; !ok {
m := &positionInCommittee{currentPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, nextPeriod: []types.CommitteeIndex{}}
positionsMap[validatorIndex] = m
} else {
positionsMap[validatorIndex].currentPeriod = append(positionsMap[validatorIndex].currentPeriod, types.CommitteeIndex(i))
}
}
nsc, err := st.NextSyncCommittee()
if err != nil {
return err
}
for i, pubkey := range nsc.Pubkeys {
p := bytesutil.ToBytes48(pubkey)
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
if !ok {
continue
}
if _, ok := positionsMap[validatorIndex]; !ok {
m := &positionInCommittee{nextPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, currentPeriod: []types.CommitteeIndex{}}
positionsMap[validatorIndex] = m
} else {
positionsMap[validatorIndex].nextPeriod = append(positionsMap[validatorIndex].nextPeriod, types.CommitteeIndex(i))
}
}
s.lock.Lock()
defer s.lock.Unlock()
if err := s.cache.Add(&syncCommitteeIndexPosition{
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,
vIndexToPositionMap: positionsMap,
}); err != nil {
return err
}
trim(s.cache, maxSyncCommitteeSize)
return nil
}
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
// The key is the `currentSyncCommitteeRoot` within the field.
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
func keyFn(obj interface{}) (string, error) {
info, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return "", errNotSyncCommitteeIndexPosition
}
return string(info.currentSyncCommitteeRoot[:]), nil
}

View File

@@ -1,32 +0,0 @@
// +build libfuzzer
package cache
import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// FakeSyncCommitteeCache is a fake `SyncCommitteeCache` to satisfy fuzzing.
type FakeSyncCommitteeCache struct {
}
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *FakeSyncCommitteeCache {
return &FakeSyncCommitteeCache{}
}
// CurrentEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
return nil, nil
}
// NextEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
return nil, nil
}
// UpdatePositionsInCommittee -- fake.
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconStateAltair) error {
return nil
}

View File

@@ -1,55 +0,0 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
)
// SyncCommitteeHeadStateCache for the latest head state requested by a sync committee participant.
type SyncCommitteeHeadStateCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewSyncCommitteeHeadState initializes a LRU cache for `SyncCommitteeHeadState` with size of 1.
func NewSyncCommitteeHeadState() *SyncCommitteeHeadStateCache {
c := lruwrpr.New(1) // only need size of 1 to avoid redundant state copies, hashing, and slot processing.
return &SyncCommitteeHeadStateCache{cache: c}
}
// Put `slot` as key and `state` as value onto the cache.
func (c *SyncCommitteeHeadStateCache) Put(slot types.Slot, st state.BeaconState) error {
c.lock.Lock()
defer c.lock.Unlock()
// Make sure that the provided state is non nil
// and is of the correct type.
if st == nil || st.IsNil() {
return ErrNilValueProvided
}
_, ok := st.(*stateAltair.BeaconState)
if !ok {
return ErrIncorrectType
}
c.cache.Add(slot, st)
return nil
}
// Get `state` using `slot` as key. Return nil if nothing is found.
func (c *SyncCommitteeHeadStateCache) Get(slot types.Slot) (state.BeaconState, error) {
c.lock.RLock()
defer c.lock.RUnlock()
val, exists := c.cache.Get(slot)
if !exists {
return nil, ErrNotFound
}
st, ok := val.(*stateAltair.BeaconState)
if !ok {
return nil, ErrIncorrectType
}
return st, nil
}

View File

@@ -1,102 +0,0 @@
package cache
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestSyncCommitteeHeadState(t *testing.T) {
beaconState, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
phase0State, err := v1.InitializeFromProto(&ethpb.BeaconState{
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
type put struct {
slot types.Slot
state state.BeaconState
}
tests := []struct {
name string
key types.Slot
put *put
want state.BeaconState
wantErr bool
wantPutErr bool
}{
{
name: "putting error in",
key: types.Slot(1),
put: &put{
slot: types.Slot(1),
state: nil,
},
wantPutErr: true,
wantErr: true,
},
{
name: "putting invalid state in",
key: types.Slot(1),
put: &put{
slot: types.Slot(1),
state: phase0State,
},
wantPutErr: true,
wantErr: true,
},
{
name: "not found when empty cache",
key: types.Slot(1),
wantErr: true,
},
{
name: "not found when non-existent key in non-empty cache",
key: types.Slot(2),
put: &put{
slot: types.Slot(1),
state: beaconState,
},
wantErr: true,
},
{
name: "found with key",
key: types.Slot(1),
put: &put{
slot: types.Slot(1),
state: beaconState,
},
want: beaconState,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewSyncCommitteeHeadState()
if tt.put != nil {
err := c.Put(tt.put.slot, tt.put.state)
if (err != nil) != tt.wantPutErr {
t.Fatalf("Put() error = %v, wantErr %v", err, tt.wantErr)
}
}
got, err := c.Get(tt.key)
if (err != nil) != tt.wantErr {
t.Fatalf("Get() error = %v, wantErr %v", err, tt.wantErr)
}
require.DeepEqual(t, tt.want, got)
})
}
}

View File

@@ -1,222 +0,0 @@
package cache_test
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
numValidators := 101
deterministicState, _ := util.DeterministicGenesisStateAltair(t, uint64(numValidators))
pubKeys := make([][]byte, deterministicState.NumValidators())
for i, val := range deterministicState.Validators() {
pubKeys[i] = val.PublicKey
}
tests := []struct {
name string
currentSyncCommittee *ethpb.SyncCommittee
nextSyncCommittee *ethpb.SyncCommittee
currentSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
nextSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
}{
{
name: "only current epoch",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
2: {},
3: {},
},
},
{
name: "only next epoch",
currentSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
2: {},
3: {},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
},
{
name: "some current epoch and some next epoch",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[7],
pubKeys[6],
pubKeys[5],
pubKeys[4],
pubKeys[7],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
7: {0, 4},
6: {1},
5: {2},
4: {3},
},
},
{
name: "some current epoch and some next epoch duplicated across",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[2],
pubKeys[1],
pubKeys[3],
pubKeys[2],
pubKeys[1],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {1, 4},
2: {0, 3},
3: {2},
},
},
{
name: "all duplicated",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
100: {0, 1, 2, 3},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
100: {0, 1, 2, 3},
},
},
{
name: "unknown keys",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, uint64(numValidators))
require.NoError(t, s.SetCurrentSyncCommittee(tt.currentSyncCommittee))
require.NoError(t, s.SetNextSyncCommittee(tt.nextSyncCommittee))
cache := cache.NewSyncCommittee()
r := [32]byte{'a'}
require.NoError(t, cache.UpdatePositionsInCommittee(r, s))
for key, indices := range tt.currentSyncMap {
pos, err := cache.CurrentPeriodIndexPosition(r, key)
require.NoError(t, err)
require.DeepEqual(t, indices, pos)
}
for key, indices := range tt.nextSyncMap {
pos, err := cache.NextPeriodIndexPosition(r, key)
require.NoError(t, err)
require.DeepEqual(t, indices, pos)
}
})
}
}
func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
c := cache.NewSyncCommittee()
_, err := c.CurrentPeriodIndexPosition([32]byte{}, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
}
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
c := cache.NewSyncCommittee()
s, _ := util.DeterministicGenesisStateAltair(t, 64)
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
require.NoError(t, err)
}
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
if i < uint64(len(inputKeys)) {
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
} else {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
}
return &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
}

View File

@@ -1,131 +0,0 @@
package cache
import (
"sync"
"time"
"github.com/patrickmn/go-cache"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
type syncSubnetIDs struct {
sCommittee *cache.Cache
sCommiteeLock sync.RWMutex
}
// SyncSubnetIDs for sync committee participant.
var SyncSubnetIDs = newSyncSubnetIDs()
func newSyncSubnetIDs() *syncSubnetIDs {
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
// Set the default duration of a sync subnet index as the whole sync committee period.
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &syncSubnetIDs{sCommittee: persistentCache}
}
// GetSyncCommitteeSubnets retrieves the sync committee subnet and expiration time of that validator's subscription.
func (s *syncSubnetIDs) GetSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch) ([]uint64, types.Epoch, bool, time.Time) {
s.sCommiteeLock.RLock()
defer s.sCommiteeLock.RUnlock()
id, duration, ok := s.sCommittee.GetWithExpiration(keyBuilder(pubkey, epoch))
if !ok {
return []uint64{}, 0, ok, time.Time{}
}
// Retrieve indices from the cache.
idxs, ok := id.([]uint64)
if !ok {
return []uint64{}, 0, ok, time.Time{}
}
// If no committees are saved, we return nothing.
if len(idxs) <= 1 {
return []uint64{}, 0, ok, time.Time{}
}
// Index 0 was used to store validator's join epoch. We do not
// return it to the caller.
// Index 1 and beyond were used to store subnets.
return idxs[1:], types.Epoch(idxs[0]), ok, duration
}
// GetAllSubnets retrieves all the non-expired subscribed subnets of all the validators
// in the cache. This method also takes the epoch as an argument to only retrieve
// assignments for epochs that have happened.
func (s *syncSubnetIDs) GetAllSubnets(currEpoch types.Epoch) []uint64 {
s.sCommiteeLock.RLock()
defer s.sCommiteeLock.RUnlock()
itemsMap := s.sCommittee.Items()
var committees []uint64
for _, v := range itemsMap {
if v.Expired() {
continue
}
idxs, ok := v.Object.([]uint64)
if !ok {
continue
}
// We skip if we do not have a join
// epoch or any relevant committee indices.
if len(idxs) <= 1 {
continue
}
// Check if the subnet is valid in the current epoch. If our
// join epoch is still in the future we skip retrieving the
// relevant committee index.
if types.Epoch(idxs[0]) > currEpoch {
continue
}
// Ignore the first index as that represents the
// epoch of the validator's assignments.
committees = append(committees, idxs[1:]...)
}
return slice.SetUint64(committees)
}
// AddSyncCommitteeSubnets adds the relevant committee for that particular validator along with its
// expiration period. An Epoch argument here denotes the epoch from which the sync committee subnets
// will be active.
func (s *syncSubnetIDs) AddSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch, comIndex []uint64, duration time.Duration) {
s.sCommiteeLock.Lock()
defer s.sCommiteeLock.Unlock()
subComCount := params.BeaconConfig().SyncCommitteeSubnetCount
// To join a sync committee subnet, select a random number of epochs before the end of the
// current sync committee period between 1 and SYNC_COMMITTEE_SUBNET_COUNT, inclusive.
// This is to smoothing out the join and exiting of the subnets so not everyone surging at the same time.
diff := (rand.NewGenerator().Uint64() % subComCount) + 1
joinEpoch, err := epoch.SafeSub(diff)
if err != nil {
// If we underflow here, we simply set the value to join
// at 0.
joinEpoch = 0
}
// Append the epoch of the subnet into the first index here. The join epoch is a special
// value, it is the epoch at which a node is supposed to join the relevant subnets.
s.sCommittee.Set(keyBuilder(pubkey, epoch), append([]uint64{uint64(joinEpoch)}, comIndex...), duration)
}
// EmptyAllCaches empties out all the related caches and flushes any stored
// entries on them. This should only ever be used for testing, in normal
// production, handling of the relevant subnets for each role is done
// separately.
func (s *syncSubnetIDs) EmptyAllCaches() {
// Clear the cache.
s.sCommiteeLock.Lock()
s.sCommittee.Flush()
s.sCommiteeLock.Unlock()
}
// build a key composed of both the pubkey and epoch here. The epoch
// here would be the 1st epoch of the sync committee period.
func keyBuilder(pubkey []byte, epoch types.Epoch) string {
epochBytes := bytesutil.Bytes8(uint64(epoch))
return string(append(pubkey, epochBytes...))
}

View File

@@ -1,57 +0,0 @@
package cache
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
c := newSyncSubnetIDs()
for i := 0; i < 20; i++ {
pubkey := [48]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)}
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
if !ok {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue
}
require.Equal(t, i, idxs[0])
}
coms := c.GetAllSubnets(100)
assert.Equal(t, 20, len(coms))
}
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
c := newSyncSubnetIDs()
for i := 0; i < 20; i++ {
pubkey := [48]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
coms := c.GetAllSubnets(50)
assert.Equal(t, 0, len(coms))
for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)}
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
if !ok {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue
}
require.Equal(t, true, uint64(jEpoch) >= 100-params.BeaconConfig().SyncCommitteeSubnetCount)
}
coms = c.GetAllSubnets(99)
assert.Equal(t, 20, len(coms))
}

View File

@@ -1,91 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"attestation.go",
"block.go",
"deposit.go",
"epoch_precompute.go",
"epoch_spec.go",
"reward.go",
"sync_committee.go",
"transition.go",
"upgrade.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//testing/util:__pkg__",
"//validator/client:__pkg__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attestation_test.go",
"block_test.go",
"deposit_fuzz_test.go",
"deposit_test.go",
"epoch_precompute_test.go",
"epoch_spec_test.go",
"reward_test.go",
"sync_committee_test.go",
"transition_test.go",
"upgrade_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,328 +0,0 @@
package altair
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"go.opencensus.io/trace"
)
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
// records. The only difference would be that the attestation signature would not be verified.
func ProcessAttestationsNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
b block.SignedBeaconBlock,
) (state.BeaconState, error) {
if err := helpers.BeaconBlockIsNil(b); err != nil {
return nil, err
}
body := b.Block().Body()
totalBalance, err := helpers.TotalActiveBalance(beaconState)
if err != nil {
return nil, err
}
for idx, attestation := range body.Attestations() {
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, attestation, totalBalance)
if err != nil {
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
}
}
return beaconState, nil
}
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
// method is used to validate attestations whose signatures have already been verified or will be verified later.
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconStateAltair,
att *ethpb.Attestation,
totalBalance uint64,
) (state.BeaconStateAltair, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
defer span.End()
if err := blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att); err != nil {
return nil, err
}
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
if err != nil {
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.Data.Slot, beaconState.Slot())
}
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.Data, delay)
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, err
}
return SetParticipationAndRewardProposer(ctx, beaconState, att.Data.Target.Epoch, indices, participatedFlags, totalBalance)
}
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
// the proposer in state.
//
// Spec code:
// # Update epoch participation flags
// if data.target.epoch == get_current_epoch(state):
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
//
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
//
// # Reward proposer
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
func SetParticipationAndRewardProposer(
ctx context.Context,
beaconState state.BeaconState,
targetEpoch types.Epoch,
indices []uint64,
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
var epochParticipation []byte
currentEpoch := time.CurrentEpoch(beaconState)
var err error
if targetEpoch == currentEpoch {
epochParticipation, err = beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
} else {
epochParticipation, err = beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
}
proposerRewardNumerator, epochParticipation, err := EpochParticipation(beaconState, indices, epochParticipation, participatedFlags, totalBalance)
if err != nil {
return nil, err
}
if targetEpoch == currentEpoch {
if err := beaconState.SetCurrentParticipationBits(epochParticipation); err != nil {
return nil, err
}
} else {
if err := beaconState.SetPreviousParticipationBits(epochParticipation); err != nil {
return nil, err
}
}
if err := RewardProposer(ctx, beaconState, proposerRewardNumerator); err != nil {
return nil, err
}
return beaconState, nil
}
// HasValidatorFlag returns true if the flag at position has set.
func HasValidatorFlag(flag, flagPosition uint8) (bool, error) {
if flagPosition > 7 {
return false, errors.New("flag position exceeds length")
}
return ((flag >> flagPosition) & 1) == 1, nil
}
// AddValidatorFlag adds new validator flag to existing one.
func AddValidatorFlag(flag, flagPosition uint8) (uint8, error) {
if flagPosition > 7 {
return flag, errors.New("flag position exceeds length")
}
return flag | (1 << flagPosition), nil
}
// EpochParticipation sets and returns the proposer reward numerator and epoch participation.
//
// Spec code:
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochParticipation []byte, participatedFlags map[uint8]bool, totalBalance uint64) (uint64, []byte, error) {
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
proposerRewardNumerator := uint64(0)
for _, index := range indices {
if index >= uint64(len(epochParticipation)) {
return 0, nil, fmt.Errorf("index %d exceeds participation length %d", index, len(epochParticipation))
}
br, err := BaseRewardWithTotalBalance(beaconState, types.ValidatorIndex(index), totalBalance)
if err != nil {
return 0, nil, err
}
has, err := HasValidatorFlag(epochParticipation[index], sourceFlagIndex)
if err != nil {
return 0, nil, err
}
if participatedFlags[sourceFlagIndex] && !has {
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
if err != nil {
return 0, nil, err
}
proposerRewardNumerator += br * cfg.TimelySourceWeight
}
has, err = HasValidatorFlag(epochParticipation[index], targetFlagIndex)
if err != nil {
return 0, nil, err
}
if participatedFlags[targetFlagIndex] && !has {
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
if err != nil {
return 0, nil, err
}
proposerRewardNumerator += br * cfg.TimelyTargetWeight
}
has, err = HasValidatorFlag(epochParticipation[index], headFlagIndex)
if err != nil {
return 0, nil, err
}
if participatedFlags[headFlagIndex] && !has {
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], headFlagIndex)
if err != nil {
return 0, nil, err
}
proposerRewardNumerator += br * cfg.TimelyHeadWeight
}
}
return proposerRewardNumerator, epochParticipation, nil
}
// RewardProposer rewards proposer by increasing proposer's balance with input reward numerator and calculated reward denominator.
//
// Spec code:
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
func RewardProposer(ctx context.Context, beaconState state.BeaconState, proposerRewardNumerator uint64) error {
cfg := params.BeaconConfig()
d := (cfg.WeightDenominator - cfg.ProposerWeight) * cfg.WeightDenominator / cfg.ProposerWeight
proposerReward := proposerRewardNumerator / d
i, err := helpers.BeaconProposerIndex(ctx, beaconState)
if err != nil {
return err
}
return helpers.IncreaseBalance(beaconState, i, proposerReward)
}
// AttestationParticipationFlagIndices retrieves a map of attestation scoring based on Altair's participation flag indices.
// This is used to facilitate process attestation during state transition and during upgrade to altair state.
//
// Spec code:
// def get_attestation_participation_flag_indices(state: BeaconState,
// data: AttestationData,
// inclusion_delay: uint64) -> Sequence[int]:
// """
// Return the flag indices that are satisfied by an attestation.
// """
// if data.target.epoch == get_current_epoch(state):
// justified_checkpoint = state.current_justified_checkpoint
// else:
// justified_checkpoint = state.previous_justified_checkpoint
//
// # Matching roots
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
// assert is_matching_source
//
// participation_flag_indices = []
// if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
// participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
// if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH:
// participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
// if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
// participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
//
// return participation_flag_indices
func AttestationParticipationFlagIndices(beaconState state.BeaconStateAltair, data *ethpb.AttestationData, delay types.Slot) (map[uint8]bool, error) {
currEpoch := time.CurrentEpoch(beaconState)
var justifiedCheckpt *ethpb.Checkpoint
if data.Target.Epoch == currEpoch {
justifiedCheckpt = beaconState.CurrentJustifiedCheckpoint()
} else {
justifiedCheckpt = beaconState.PreviousJustifiedCheckpoint()
}
matchedSrc, matchedTgt, matchedHead, err := MatchingStatus(beaconState, data, justifiedCheckpt)
if err != nil {
return nil, err
}
if !matchedSrc {
return nil, errors.New("source epoch does not match")
}
participatedFlags := make(map[uint8]bool)
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
slotsPerEpoch := cfg.SlotsPerEpoch
sqtRootSlots := cfg.SqrRootSlotsPerEpoch
if matchedSrc && delay <= sqtRootSlots {
participatedFlags[sourceFlagIndex] = true
}
matchedSrcTgt := matchedSrc && matchedTgt
if matchedSrcTgt && delay <= slotsPerEpoch {
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
participatedFlags[headFlagIndex] = true
}
return participatedFlags, nil
}
// MatchingStatus returns the matching statues for attestation data's source target and head.
//
// Spec code:
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
func MatchingStatus(beaconState state.BeaconState, data *ethpb.AttestationData, cp *ethpb.Checkpoint) (matchedSrc, matchedTgt, matchedHead bool, err error) {
matchedSrc = attestation.CheckPointIsEqual(data.Source, cp)
r, err := helpers.BlockRoot(beaconState, data.Target.Epoch)
if err != nil {
return false, false, false, err
}
matchedTgt = bytes.Equal(r, data.Target.Root)
r, err = helpers.BlockRootAtSlot(beaconState, data.Slot)
if err != nil {
return false, false, false, err
}
matchedHead = bytes.Equal(r, data.BeaconBlockRoot)
return
}

View File

@@ -1,777 +0,0 @@
package altair_test
import (
"context"
"fmt"
"testing"
fuzz "github.com/google/gofuzz"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/math"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
attestations := []*ethpb.Attestation{
util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Slot: 5,
},
}),
}
b := util.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
want := fmt.Sprintf(
"attestation slot %d + inclusion delay %d > state slot %d",
attestations[0].Data.Slot,
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}}})
b := util.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch*4 + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := fmt.Sprintf(
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
att.Data.Target.Epoch,
time.PrevEpoch(beaconState),
time.CurrentEpoch(beaconState),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
},
AggregationBits: bitfield.Bitlist{0x09},
},
}
b := util.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
want := "source check point not equal to current justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Slot: params.BeaconConfig().SlotsPerEpoch,
},
AggregationBits: aggBits,
},
}
b := util.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
err := beaconState.SetSlot(beaconState.Slot() + 2*params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := "source check point not equal to previous justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(4)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}},
AggregationBits: aggBits,
}
b := util.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, expected, err)
}
func TestProcessAttestations_OK(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := util.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.NoError(t, err)
}
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, 64)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
aggBits := bitfield.NewBitlist(2)
aggBits.SetBitAt(0, true)
aggBits.SetBitAt(1, true)
r, err := helpers.BlockRootAtSlot(beaconState, 0)
require.NoError(t, err)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: r,
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
}
zeroSig := [96]byte{}
att.Signature = zeroSig[:]
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, make([]byte, 32))
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att, b)
require.NoError(t, err)
p, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
for _, index := range indices {
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)
require.NoError(t, err)
require.Equal(t, true, has)
has, err = altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelySourceFlagIndex)
require.NoError(t, err)
require.Equal(t, true, has)
has, err = altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyTargetFlagIndex)
require.NoError(t, err)
require.Equal(t, true, has)
}
}
func TestValidatorFlag_Has(t *testing.T) {
tests := []struct {
name string
set uint8
expected []uint8
}{
{name: "none",
set: 0,
expected: []uint8{},
},
{
name: "source",
set: 1,
expected: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
},
{
name: "target",
set: 2,
expected: []uint8{params.BeaconConfig().TimelyTargetFlagIndex},
},
{
name: "head",
set: 4,
expected: []uint8{params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source, target",
set: 3,
expected: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
},
{
name: "source, head",
set: 5,
expected: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "target, head",
set: 6,
expected: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
},
{
name: "source, target, head",
set: 7,
expected: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for _, f := range tt.expected {
has, err := altair.HasValidatorFlag(tt.set, f)
require.NoError(t, err)
require.Equal(t, true, has)
}
})
}
}
func TestValidatorFlag_Has_ExceedsLength(t *testing.T) {
_, err := altair.HasValidatorFlag(0, 8)
require.ErrorContains(t, "flag position exceeds length", err)
}
func TestValidatorFlag_Add(t *testing.T) {
tests := []struct {
name string
set []uint8
expectedTrue []uint8
expectedFalse []uint8
}{
{name: "none",
set: []uint8{},
expectedTrue: []uint8{},
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
expectedFalse: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source, target",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
expectedFalse: []uint8{params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source, target, head",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
expectedFalse: []uint8{},
},
}
var err error
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := uint8(0)
for _, f := range tt.set {
b, err = altair.AddValidatorFlag(b, f)
require.NoError(t, err)
}
for _, f := range tt.expectedFalse {
has, err := altair.HasValidatorFlag(b, f)
require.NoError(t, err)
require.Equal(t, false, has)
}
for _, f := range tt.expectedTrue {
has, err := altair.HasValidatorFlag(b, f)
require.NoError(t, err)
require.Equal(t, true, has)
}
})
}
}
func TestValidatorFlag_Add_ExceedsLength(t *testing.T) {
_, err := altair.AddValidatorFlag(0, 8)
require.ErrorContains(t, "flag position exceeds length", err)
}
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
b := &ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{}}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
if b.Block == nil {
b.Block = &ethpb.BeaconBlockAltair{}
}
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}
}
}
func TestSetParticipationAndRewardProposer(t *testing.T) {
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
tests := []struct {
name string
indices []uint64
epochParticipation []byte
participatedFlags map[uint8]bool
epoch types.Epoch
wantedBalance uint64
wantedParticipation []byte
}{
{name: "none participated",
indices: []uint64{}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: false,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0},
wantedBalance: 32000000000,
},
{name: "some participated without flags",
indices: []uint64{0, 1, 2, 3}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: false,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0},
wantedBalance: 32000000000,
},
{name: "some participated with some flags",
indices: []uint64{0, 1, 2, 3}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: false,
},
wantedParticipation: []byte{3, 3, 3, 3, 0, 0, 0, 0},
wantedBalance: 32000090342,
},
{name: "all participated with some flags",
indices: []uint64{0, 1, 2, 3, 4, 5, 6, 7}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedParticipation: []byte{1, 1, 1, 1, 1, 1, 1, 1},
wantedBalance: 32000063240,
},
{name: "all participated with all flags",
indices: []uint64{0, 1, 2, 3, 4, 5, 6, 7}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: true,
},
wantedParticipation: []byte{7, 7, 7, 7, 7, 7, 7, 7},
wantedBalance: 32000243925,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
currentEpoch := time.CurrentEpoch(beaconState)
if test.epoch == currentEpoch {
require.NoError(t, beaconState.SetCurrentParticipationBits(test.epochParticipation))
} else {
require.NoError(t, beaconState.SetPreviousParticipationBits(test.epochParticipation))
}
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
st, err := altair.SetParticipationAndRewardProposer(context.Background(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
require.NoError(t, err)
i, err := helpers.BeaconProposerIndex(context.Background(), st)
require.NoError(t, err)
b, err = beaconState.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, test.wantedBalance, b)
if test.epoch == currentEpoch {
p, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, test.wantedParticipation, p)
} else {
p, err := beaconState.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, test.wantedParticipation, p)
}
})
}
}
func TestEpochParticipation(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
tests := []struct {
name string
indices []uint64
epochParticipation []byte
participatedFlags map[uint8]bool
wantedNumerator uint64
wantedEpochParticipation []byte
}{
{name: "none participated",
indices: []uint64{}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: false,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedNumerator: 0,
wantedEpochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0},
},
{name: "some participated without flags",
indices: []uint64{0, 1, 2, 3}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: false,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedNumerator: 0,
wantedEpochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0},
},
{name: "some participated with some flags",
indices: []uint64{0, 1, 2, 3}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: false,
},
wantedNumerator: 40473600,
wantedEpochParticipation: []byte{3, 3, 3, 3, 0, 0, 0, 0},
},
{name: "all participated with some flags",
indices: []uint64{0, 1, 2, 3, 4, 5, 6, 7}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: false,
headFlagIndex: false,
},
wantedNumerator: 28331520,
wantedEpochParticipation: []byte{1, 1, 1, 1, 1, 1, 1, 1},
},
{name: "all participated with all flags",
indices: []uint64{0, 1, 2, 3, 4, 5, 6, 7}, epochParticipation: []byte{0, 0, 0, 0, 0, 0, 0, 0}, participatedFlags: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: true,
},
wantedNumerator: 109278720,
wantedEpochParticipation: []byte{7, 7, 7, 7, 7, 7, 7, 7},
},
}
for _, test := range tests {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
n, p, err := altair.EpochParticipation(beaconState, test.indices, test.epochParticipation, test.participatedFlags, b)
require.NoError(t, err)
require.Equal(t, test.wantedNumerator, n)
require.DeepSSZEqual(t, test.wantedEpochParticipation, p)
}
}
func TestRewardProposer(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
tests := []struct {
rewardNumerator uint64
want uint64
}{
{rewardNumerator: 1, want: 32000000000},
{rewardNumerator: 10000, want: 32000000022},
{rewardNumerator: 1000000, want: 32000002254},
{rewardNumerator: 1000000000, want: 32002234396},
{rewardNumerator: 1000000000000, want: 34234377253},
}
for _, test := range tests {
require.NoError(t, altair.RewardProposer(context.Background(), beaconState, test.rewardNumerator))
i, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
b, err := beaconState.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, test.want, b)
}
}
func TestAttestationParticipationFlagIndices(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
tests := []struct {
name string
inputState state.BeaconState
inputData *ethpb.AttestationData
inputDelay types.Slot
participationIndices map[uint8]bool
}{
{
name: "none",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{},
},
inputDelay: params.BeaconConfig().SlotsPerEpoch,
participationIndices: map[uint8]bool{},
},
{
name: "participated source",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{},
},
inputDelay: types.Slot(math.IntegerSquareRoot(uint64(cfg.SlotsPerEpoch)) - 1),
participationIndices: map[uint8]bool{
sourceFlagIndex: true,
},
},
{
name: "participated source and target",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: types.Slot(math.IntegerSquareRoot(uint64(cfg.SlotsPerEpoch)) - 1),
participationIndices: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
},
},
{
name: "participated source and target and head",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
BeaconBlockRoot: params.BeaconConfig().ZeroHash[:],
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: 1,
participationIndices: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: true,
},
},
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
require.NoError(t, err)
require.DeepEqual(t, test.participationIndices, flagIndices)
}
}
func TestMatchingStatus(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
tests := []struct {
name string
inputState state.BeaconState
inputData *ethpb.AttestationData
inputCheckpt *ethpb.Checkpoint
matchedSource bool
matchedTarget bool
matchedHead bool
}{
{
name: "non matched",
inputState: beaconState,
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{},
},
inputCheckpt: &ethpb.Checkpoint{},
},
{
name: "source matched",
inputState: beaconState,
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{},
},
inputCheckpt: &ethpb.Checkpoint{},
matchedSource: true,
},
{
name: "target matched",
inputState: beaconState,
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputCheckpt: &ethpb.Checkpoint{},
matchedTarget: true,
},
{
name: "head matched",
inputState: beaconState,
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{},
BeaconBlockRoot: params.BeaconConfig().ZeroHash[:],
},
inputCheckpt: &ethpb.Checkpoint{},
matchedHead: true,
},
{
name: "everything matched",
inputState: beaconState,
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
BeaconBlockRoot: params.BeaconConfig().ZeroHash[:],
},
inputCheckpt: &ethpb.Checkpoint{},
matchedSource: true,
matchedTarget: true,
matchedHead: true,
},
}
for _, test := range tests {
src, tgt, head, err := altair.MatchingStatus(test.inputState, test.inputData, test.inputCheckpt)
require.NoError(t, err)
require.Equal(t, test.matchedSource, src)
require.Equal(t, test.matchedTarget, tgt)
require.Equal(t, test.matchedHead, head)
}
}

View File

@@ -1,178 +0,0 @@
package altair
import (
"context"
"errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
// ProcessSyncAggregate verifies sync committee aggregate signature signing over the previous slot block root.
//
// Spec code:
// def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None:
// # Verify sync committee aggregate signature signing over the previous slot block root
// committee_pubkeys = state.current_sync_committee.pubkeys
// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
// previous_slot = max(state.slot, Slot(1)) - Slot(1)
// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
//
// # Compute participant and proposer rewards
// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
//
// # Apply participant and proposer rewards
// all_pubkeys = [v.pubkey for v in state.validators]
// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
// if participation_bit:
// increase_balance(state, participant_index, participant_reward)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconStateAltair, sync *ethpb.SyncAggregate) (state.BeaconStateAltair, error) {
votedKeys, votedIndices, didntVoteIndices, err := FilterSyncCommitteeVotes(s, sync)
if err != nil {
return nil, err
}
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, err
}
return ApplySyncRewardsPenalties(ctx, s, votedIndices, didntVoteIndices)
}
// FilterSyncCommitteeVotes filters the validator public keys and indices for the ones that voted and didn't vote.
func FilterSyncCommitteeVotes(s state.BeaconStateAltair, sync *ethpb.SyncAggregate) (
votedKeys []bls.PublicKey,
votedIndices []types.ValidatorIndex,
didntVoteIndices []types.ValidatorIndex,
err error) {
currentSyncCommittee, err := s.CurrentSyncCommittee()
if err != nil {
return nil, nil, nil, err
}
if currentSyncCommittee == nil {
return nil, nil, nil, errors.New("nil current sync committee in state")
}
committeeKeys := currentSyncCommittee.Pubkeys
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
return nil, nil, nil, errors.New("bits length exceeds committee length")
}
votedKeys = make([]bls.PublicKey, 0, len(committeeKeys))
votedIndices = make([]types.ValidatorIndex, 0, len(committeeKeys))
didntVoteIndices = make([]types.ValidatorIndex, 0) // No allocation. Expect most votes.
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, nil, nil, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, nil, nil, err
}
votedKeys = append(votedKeys, pubKey)
votedIndices = append(votedIndices, vIdx)
} else {
didntVoteIndices = append(didntVoteIndices, vIdx)
}
}
return
}
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
func VerifySyncCommitteeSig(s state.BeaconStateAltair, syncKeys []bls.PublicKey, syncSig []byte) error {
ps := slots.PrevSlot(s.Slot())
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
if err != nil {
return err
}
pbr, err := helpers.BlockRootAtSlot(s, ps)
if err != nil {
return err
}
sszBytes := p2pType.SSZBytes(pbr)
r, err := signing.ComputeSigningRoot(&sszBytes, d)
if err != nil {
return err
}
sig, err := bls.SignatureFromBytes(syncSig)
if err != nil {
return err
}
if !sig.Eth2FastAggregateVerify(syncKeys, r) {
return errors.New("invalid sync committee signature")
}
return nil
}
// ApplySyncRewardsPenalties applies rewards and penalties for proposer and sync committee participants.
func ApplySyncRewardsPenalties(ctx context.Context, s state.BeaconStateAltair, votedIndices, didntVoteIndices []types.ValidatorIndex) (state.BeaconStateAltair, error) {
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, err
}
// Apply sync committee rewards.
earnedProposerReward := uint64(0)
for _, index := range votedIndices {
if err := helpers.IncreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
earnedProposerReward += proposerReward
}
// Apply proposer rewards.
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, err
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, err
}
// Apply sync committee penalties.
for _, index := range didntVoteIndices {
if err := helpers.DecreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
}
return s, nil
}
// SyncRewards returns the proposer reward and the sync participant reward given the total active balance in state.
func SyncRewards(activeBalance uint64) (proposerReward, participantReward uint64, err error) {
cfg := params.BeaconConfig()
totalActiveIncrements := activeBalance / cfg.EffectiveBalanceIncrement
baseRewardPerInc, err := BaseRewardPerIncrement(activeBalance)
if err != nil {
return 0, 0, err
}
totalBaseRewards := baseRewardPerInc * totalActiveIncrements
maxParticipantRewards := totalBaseRewards * cfg.SyncRewardWeight / cfg.WeightDenominator / uint64(cfg.SlotsPerEpoch)
participantReward = maxParticipantRewards / cfg.SyncCommitteeSize
proposerReward = participantReward * cfg.ProposerWeight / (cfg.WeightDenominator - cfg.ProposerWeight)
return
}

View File

@@ -1,320 +0,0 @@
package altair_test
import (
"context"
"math"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
require.NoError(t, err)
sigs := make([]bls.Signature, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr)
sb, err := signing.ComputeDomainAndSign(beaconState, time.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
beaconState, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
// Use a non-sync committee index to compare profitability.
syncCommittee := make(map[types.ValidatorIndex]bool)
for _, index := range indices {
syncCommittee[index] = true
}
nonSyncIndex := types.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerCommittee + 1)
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxValidatorsPerCommittee; i++ {
if !syncCommittee[i] {
nonSyncIndex = i
break
}
}
// Sync committee should be more profitable than non sync committee
balances := beaconState.Balances()
require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex])
// Proposer should be more profitable than rest of the sync committee
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, true, balances[proposerIndex] > balances[indices[0]])
// Sync committee should have the same profits, except you are a proposer
for i := 1; i < len(indices); i++ {
if proposerIndex == indices[i-1] || proposerIndex == indices[i] {
continue
}
require.Equal(t, balances[indices[i-1]], balances[indices[i]])
}
// Increased balance validator count should equal to sync committee count
increased := uint64(0)
for _, balance := range balances {
if balance > params.BeaconConfig().MaxEffectiveBalance {
increased++
}
}
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, increased)
}
func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xAA
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
require.NoError(t, err)
sigs := make([]bls.Signature, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr)
sb, err := signing.ComputeDomainAndSign(beaconState, time.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.ErrorContains(t, "invalid sync committee signature", err)
}
func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xAA
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
require.NoError(t, err)
sigs := make([]bls.Signature, 0, len(indices))
for i, indice := range indices {
if syncBits.BitAt(uint64(i)) {
b := p2pType.SSZBytes(pbr)
sb, err := signing.ComputeDomainAndSign(beaconState, time.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs = append(sigs, sig)
}
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
}
func TestProcessSyncCommittee_FilterSyncCommitteeVotes(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xAA
}
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
}
votedKeys, votedIndices, didntVoteIndices, err := altair.FilterSyncCommitteeVotes(beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[48]byte]bool)
for _, key := range votedKeys {
votedMap[bytesutil.ToBytes48(key.Marshal())] = true
}
require.Equal(t, int(syncBits.Len()/2), len(votedKeys))
require.Equal(t, int(syncBits.Len()/2), len(votedIndices))
require.Equal(t, int(syncBits.Len()/2), len(didntVoteIndices))
for i := 0; i < len(syncBits); i++ {
if syncBits.BitAt(uint64(i)) {
pk := beaconState.PubkeyAtIndex(votedIndices[i])
require.DeepEqual(t, true, votedMap[pk])
} else {
pk := beaconState.PubkeyAtIndex(didntVoteIndices[i])
require.DeepEqual(t, false, votedMap[pk])
}
}
}
func Test_VerifySyncCommitteeSig(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
require.NoError(t, err)
sigs := make([]bls.Signature, len(indices))
pks := make([]bls.PublicKey, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr)
sb, err := signing.ComputeDomainAndSign(beaconState, time.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
pks[i] = privKeys[indice].PublicKey()
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
blsKey, err := bls.RandKey()
require.NoError(t, err)
require.ErrorContains(t, "invalid sync committee signature", altair.VerifySyncCommitteeSig(beaconState, pks, blsKey.Sign([]byte{'m', 'e', 'o', 'w'}).Marshal()))
require.NoError(t, altair.VerifySyncCommitteeSig(beaconState, pks, aggregatedSig))
}
func Test_ApplySyncRewardsPenalties(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
beaconState, err := altair.ApplySyncRewardsPenalties(context.Background(), beaconState,
[]types.ValidatorIndex{0, 1}, // voted
[]types.ValidatorIndex{2, 3}) // didn't vote
require.NoError(t, err)
balances := beaconState.Balances()
require.Equal(t, uint64(32000000988), balances[0])
require.Equal(t, balances[0], balances[1])
require.Equal(t, uint64(31999999012), balances[2])
require.Equal(t, balances[2], balances[3])
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, uint64(32000000282), balances[proposerIndex])
}
func Test_SyncRewards(t *testing.T) {
tests := []struct {
name string
activeBalance uint64
wantProposerReward uint64
wantParticipantReward uint64
errString string
}{
{
name: "active balance is 0",
activeBalance: 0,
wantProposerReward: 0,
wantParticipantReward: 0,
errString: "active balance can't be 0",
},
{
name: "active balance is 1",
activeBalance: 1,
wantProposerReward: 0,
wantParticipantReward: 0,
errString: "",
},
{
name: "active balance is 1eth",
activeBalance: params.BeaconConfig().EffectiveBalanceIncrement,
wantProposerReward: 0,
wantParticipantReward: 3,
errString: "",
},
{
name: "active balance is 32eth",
activeBalance: params.BeaconConfig().MaxEffectiveBalance,
wantProposerReward: 3,
wantParticipantReward: 21,
errString: "",
},
{
name: "active balance is 32eth * 1m validators",
activeBalance: params.BeaconConfig().MaxEffectiveBalance * 1e9,
wantProposerReward: 62780,
wantParticipantReward: 439463,
errString: "",
},
{
name: "active balance is max uint64",
activeBalance: math.MaxUint64,
wantProposerReward: 70368,
wantParticipantReward: 492581,
errString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
proposerReward, participarntReward, err := altair.SyncRewards(tt.activeBalance)
if (err != nil) && (tt.errString != "") {
require.ErrorContains(t, tt.errString, err)
return
}
require.Equal(t, tt.wantProposerReward, proposerReward)
require.Equal(t, tt.wantParticipantReward, participarntReward)
})
}
}

View File

@@ -1,55 +0,0 @@
package altair
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// ProcessDeposits processes validator deposits for beacon state Altair.
func ProcessDeposits(
ctx context.Context,
beaconState state.BeaconStateAltair,
deposits []*ethpb.Deposit,
) (state.BeaconStateAltair, error) {
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
if err != nil {
return nil, err
}
for _, deposit := range deposits {
if deposit == nil || deposit.Data == nil {
return nil, errors.New("got a nil deposit in block")
}
beaconState, err = ProcessDeposit(ctx, beaconState, deposit, batchVerified)
if err != nil {
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
}
}
return beaconState, nil
}
// ProcessDeposit processes validator deposit for beacon state Altair.
func ProcessDeposit(ctx context.Context, beaconState state.BeaconStateAltair, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconStateAltair, error) {
beaconState, isNewValidator, err := blocks.ProcessDeposit(beaconState, deposit, verifySignature)
if err != nil {
return nil, err
}
if isNewValidator {
if err := beaconState.AppendInactivityScore(0); err != nil {
return nil, err
}
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
return nil, err
}
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
return nil, err
}
}
return beaconState, nil
}

View File

@@ -1,48 +0,0 @@
package altair_test
import (
"context"
"testing"
fuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
deposits := make([]*ethpb.Deposit, 100)
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
for i := range deposits {
fuzzer.Fuzz(deposits[i])
}
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := altair.ProcessDeposits(ctx, s, deposits)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
}
}
}
func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := altair.ProcessDeposit(context.Background(), s, deposit, true)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
}
}

View File

@@ -1,245 +0,0 @@
package altair_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
// Same validator created 3 valid deposits within the same block
dep, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(dep))
require.NoError(t, err)
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
require.NoError(t, err, "Expected block deposits to process correctly")
require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
}
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
deposit := &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
}
leaf, err := deposit.Data.HashTreeRoot()
require.NoError(t, err)
// We then create a merkle branch for the test.
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate trie")
proof, err := depositTrie.MerkleProof(0)
require.NoError(t, err, "Could not generate proof")
deposit.Proof = proof
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Eth1Data: &ethpb.Eth1Data{
DepositRoot: []byte{0},
BlockHash: []byte{1},
},
})
require.NoError(t, err)
want := "deposit root did not verify"
_, err = altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
require.ErrorContains(t, want, err)
}
func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
dep, _, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(dep))
require.NoError(t, err)
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0]})
require.NoError(t, err, "Expected block deposits to process correctly")
if newState.Balances()[1] != dep[0].Data.Amount {
t.Errorf(
"Expected state validator balances index 0 to equal %d, received %d",
dep[0].Data.Amount,
newState.Balances()[1],
)
}
}
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
deposit := &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: sk.PublicKey().Marshal(),
Amount: 1000,
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
}
sr, err := signing.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 32))
require.NoError(t, err)
sig := sk.Sign(sr[:])
deposit.Data.Signature = sig.Marshal()
leaf, err := deposit.Data.HashTreeRoot()
require.NoError(t, err)
// We then create a merkle branch for the test.
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate trie")
proof, err := depositTrie.MerkleProof(0)
require.NoError(t, err, "Could not generate proof")
deposit.Proof = proof
registry := []*ethpb.Validator{
{
PublicKey: []byte{1, 2, 3},
},
{
PublicKey: sk.PublicKey().Marshal(),
WithdrawalCredentials: []byte{1},
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
Eth1Data: &ethpb.Eth1Data{
DepositRoot: root[:],
BlockHash: root[:],
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
require.NoError(t, err, "Process deposit failed")
require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
}
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
// Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
dep, _, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(dep))
require.NoError(t, err)
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
require.NoError(t, err, "Process deposit failed")
require.Equal(t, 2, len(newState.Validators()), "Expected validator list to have length 2")
require.Equal(t, 2, len(newState.Balances()), "Expected validator balances list to have length 2")
if newState.Balances()[1] != dep[0].Data.Amount {
t.Errorf(
"Expected state validator balances index 1 to equal %d, received %d",
dep[0].Data.Amount,
newState.Balances()[1],
)
}
}
func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
// Same test settings as in TestProcessDeposit_AddsNewValidatorDeposit, except that we use an invalid signature
dep, _, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := util.DepositTrieFromDeposits(dep)
require.NoError(t, err)
root := trie.HashTreeRoot()
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
if newState.Eth1DepositIndex() != 1 {
t.Errorf(
"Expected Eth1DepositIndex to be increased by 1 after processing an invalid deposit, received change: %v",
newState.Eth1DepositIndex(),
)
}
if len(newState.Validators()) != 1 {
t.Errorf("Expected validator list to have length 1, received: %v", len(newState.Validators()))
}
if len(newState.Balances()) != 1 {
t.Errorf("Expected validator balances list to have length 1, received: %v", len(newState.Balances()))
}
if newState.Balances()[0] != 0 {
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
}
}

View File

@@ -1,341 +0,0 @@
package altair
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/math"
"go.opencensus.io/trace"
)
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconStateAltair) ([]*precompute.Validator, *precompute.Balance, error) {
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
defer span.End()
vals := make([]*precompute.Validator, beaconState.NumValidators())
bal := &precompute.Balance{}
prevEpoch := time.PrevEpoch(beaconState)
currentEpoch := time.CurrentEpoch(beaconState)
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, nil, err
}
// This shouldn't happen with a correct beacon state,
// but rather be safe to defend against index out of bound panics.
if beaconState.NumValidators() != len(inactivityScores) {
return nil, nil, errors.New("num of validators is different than num of inactivity scores")
}
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
// Set validator's balance, inactivity score and slashed/withdrawable status.
v := &precompute.Validator{
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
InactivityScore: inactivityScores[idx],
IsSlashed: val.Slashed(),
IsWithdrawableCurrentEpoch: currentEpoch >= val.WithdrawableEpoch(),
}
// Set validator's active status for current epoch.
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
v.IsActiveCurrentEpoch = true
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance())
if err != nil {
return err
}
}
// Set validator's active status for preivous epoch.
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
v.IsActivePrevEpoch = true
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
if err != nil {
return err
}
}
vals[idx] = v
return nil
}); err != nil {
return nil, nil, errors.Wrap(err, "could not read every validator")
}
return vals, bal, nil
}
// ProcessInactivityScores of beacon chain. This updates inactivity scores of beacon chain and
// updates the precompute validator struct for later processing. The inactivity scores work as following:
// For fully inactive validators and perfect active validators, the effect is the same as before Altair.
// For a validator is inactive and the chain fails to finalize, the inactivity score increases by a fixed number, the total loss after N epochs is proportional to N**2/2.
// For imperfectly active validators. The inactivity score's behavior is specified by this function:
// If a validator fails to submit an attestation with the correct target, their inactivity score goes up by 4.
// If they successfully submit an attestation with the correct source and target, their inactivity score drops by 1
// If the chain has recently finalized, each validator's score drops by 16.
func ProcessInactivityScores(
ctx context.Context,
beaconState state.BeaconState,
vals []*precompute.Validator,
) (state.BeaconState, []*precompute.Validator, error) {
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
defer span.End()
cfg := params.BeaconConfig()
if time.CurrentEpoch(beaconState) == cfg.GenesisEpoch {
return beaconState, vals, nil
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, nil, err
}
bias := cfg.InactivityScoreBias
recoveryRate := cfg.InactivityScoreRecoveryRate
prevEpoch := time.PrevEpoch(beaconState)
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
for i, v := range vals {
if !precompute.EligibleForRewards(v) {
continue
}
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
// Decrease inactivity score when validator gets target correct.
if v.InactivityScore > 0 {
v.InactivityScore -= 1
}
} else {
v.InactivityScore, err = math.Add64(v.InactivityScore, bias)
if err != nil {
return nil, nil, err
}
}
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
score := recoveryRate
// Prevents underflow below 0.
if score > v.InactivityScore {
score = v.InactivityScore
}
v.InactivityScore -= score
}
inactivityScores[i] = v.InactivityScore
}
if err := beaconState.SetInactivityScores(inactivityScores); err != nil {
return nil, nil, err
}
return beaconState, vals, nil
}
// ProcessEpochParticipation processes the epoch participation in state and updates individual validator's pre computes,
// it also tracks and updates epoch attesting balances.
// Spec code:
// if epoch == get_current_epoch(state):
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
// active_validator_indices = get_active_validator_indices(state, epoch)
// participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
// return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
func ProcessEpochParticipation(
ctx context.Context,
beaconState state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
) ([]*precompute.Validator, *precompute.Balance, error) {
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
defer span.End()
cp, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, nil, err
}
cfg := params.BeaconConfig()
targetIdx := cfg.TimelyTargetFlagIndex
sourceIdx := cfg.TimelySourceFlagIndex
headIdx := cfg.TimelyHeadFlagIndex
for i, b := range cp {
has, err := HasValidatorFlag(b, sourceIdx)
if err != nil {
return nil, nil, err
}
if has && vals[i].IsActiveCurrentEpoch {
vals[i].IsCurrentEpochAttester = true
}
has, err = HasValidatorFlag(b, targetIdx)
if err != nil {
return nil, nil, err
}
if has && vals[i].IsActiveCurrentEpoch {
vals[i].IsCurrentEpochAttester = true
vals[i].IsCurrentEpochTargetAttester = true
}
}
pp, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, nil, err
}
for i, b := range pp {
has, err := HasValidatorFlag(b, sourceIdx)
if err != nil {
return nil, nil, err
}
if has && vals[i].IsActivePrevEpoch {
vals[i].IsPrevEpochAttester = true
vals[i].IsPrevEpochSourceAttester = true
}
has, err = HasValidatorFlag(b, targetIdx)
if err != nil {
return nil, nil, err
}
if has && vals[i].IsActivePrevEpoch {
vals[i].IsPrevEpochAttester = true
vals[i].IsPrevEpochTargetAttester = true
}
has, err = HasValidatorFlag(b, headIdx)
if err != nil {
return nil, nil, err
}
if has && vals[i].IsActivePrevEpoch {
vals[i].IsPrevEpochHeadAttester = true
}
}
bal = precompute.UpdateBalance(vals, bal, beaconState.Version())
return vals, bal, nil
}
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
beaconState state.BeaconStateAltair,
bal *precompute.Balance,
vals []*precompute.Validator,
) (state.BeaconStateAltair, error) {
// Don't process rewards and penalties in genesis epoch.
cfg := params.BeaconConfig()
if time.CurrentEpoch(beaconState) == cfg.GenesisEpoch {
return beaconState, nil
}
numOfVals := beaconState.NumValidators()
// Guard against an out-of-bounds using validator balance precompute.
if len(vals) != numOfVals || len(vals) != beaconState.BalancesLength() {
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
balances := beaconState.Balances()
for i := 0; i < numOfVals; i++ {
vals[i].BeforeEpochTransitionBalance = balances[i]
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
if err := beaconState.SetBalances(balances); err != nil {
return nil, errors.Wrap(err, "could not set validator balances")
}
return beaconState, nil
}
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconStateAltair, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
increment := cfg.EffectiveBalanceIncrement
factor := cfg.BaseRewardFactor
baseRewardMultiplier := increment * factor / math.IntegerSquareRoot(bal.ActiveCurrentEpoch)
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
inactivityDenominator := cfg.InactivityScoreBias * cfg.InactivityPenaltyQuotientAltair
for i, v := range vals {
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, nil, err
}
}
return rewards, penalties, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (reward, penalty uint64, err error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return 0, 0, nil
}
cfg := params.BeaconConfig()
increment := cfg.EffectiveBalanceIncrement
effectiveBalance := val.CurrentEpochEffectiveBalance
baseReward := (effectiveBalance / increment) * baseRewardMultiplier
activeIncrement := bal.ActiveCurrentEpoch / increment
weightDenominator := cfg.WeightDenominator
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
reward, penalty = uint64(0), uint64(0)
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
reward += n / (activeIncrement * weightDenominator)
}
} else {
penalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
reward += n / (activeIncrement * weightDenominator)
}
} else {
penalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
reward += n / (activeIncrement * weightDenominator)
}
}
// Process finality delay penalty
// Apply an additional penalty to validators that did not vote on the correct target or slashed
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return 0, 0, err
}
penalty += n / inactivityDenominator
}
return reward, penalty, nil
}

Some files were not shown because too many files have changed in this diff Show More