Compare commits

..

3 Commits

Author SHA1 Message Date
nisdas
cf0505b8db potuz's suggestion 2023-06-19 22:19:56 +08:00
nisdas
3a9764d3af potuz's suggestion 2023-06-19 20:01:13 +08:00
nisdas
d1d3edc7fe time locks 2023-06-19 18:30:26 +08:00
393 changed files with 15665 additions and 38531 deletions

View File

@@ -3,7 +3,6 @@ import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
@@ -15,7 +14,7 @@ coverage --define=coverage_enabled=1
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false --define blst_modern=true
build --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -28,7 +27,30 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
@@ -37,13 +59,9 @@ build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# Abseil requires c++14 or greater.
build --cxxopt=-std=c++20
build --host_cxxopt=-std=c++20

View File

@@ -1 +1 @@
6.3.2
6.1.0

View File

@@ -1,42 +0,0 @@
name: "fuzz"
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
permissions:
contents: write
pull-requests: write
jobs:
list:
runs-on: ubuntu-latest
timeout-minutes: 180
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.20'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
outputs:
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
fuzz:
runs-on: ubuntu-latest
timeout-minutes: 360
needs: list
strategy:
fail-fast: false
matrix:
include: ${{fromJson(needs.list.outputs.fuzz-tests)}}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.20'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}
fuzz-regexp: ${{ matrix.func }}
fuzz-time: "20m"

View File

@@ -3,6 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"
@@ -133,8 +134,8 @@ nogo(
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],
"//conditions:default": [
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
],
}),
)

View File

@@ -17,34 +17,26 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
zig_toolchains()
bazel_toolchain_dependencies()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
)
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
configure_nonhermetic_darwin()
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
@@ -67,10 +59,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
sha256 = "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
],
)
@@ -94,10 +86,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
],
)
@@ -172,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.7",
go_version = "1.20.3",
nogo = "@//:nogo",
)
@@ -313,18 +305,39 @@ filegroup(
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
git_repository(
name = "com_google_protobuf",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
strip_prefix = "protobuf-23.3",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
],
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1617835118 -0700",
)
# Group the sources of the library so that CMake rule have access to it
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
# External dependencies
http_archive(
name = "prysm_web_ui",
build_file_content = """
filegroup(
name = "site",
srcs = glob(["**/*"]),
visibility = ["//visibility:public"],
)
""",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
],
)
load("//:deps.bzl", "prysm_deps")
# gazelle:repository_macro deps.bzl%prysm_deps
@@ -356,6 +369,10 @@ load(
_cc_image_repos()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
@@ -364,6 +381,10 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["headers.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api",
visibility = ["//visibility:public"],
)

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"path"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
@@ -110,8 +109,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
WithField("state_root", sr).
WithField("block_root", br).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,

View File

@@ -128,7 +128,6 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -227,7 +226,6 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -401,7 +399,6 @@ func TestDownloadFinalizedData(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))

View File

@@ -13,7 +13,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//encoding/bytesutil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
@@ -33,7 +32,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
)
@@ -117,11 +116,7 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
// Something went wrong, but the request completed, meaning we can write headers and the error message.
for h, vs := range resp.Header {
for _, v := range vs {
if strings.HasSuffix(h, api.VersionHeader) {
w.Header().Set(api.VersionHeader, v)
} else {
w.Header().Set(h, v)
}
w.Header().Set(h, v)
}
}
// Handle gRPC timeout.
@@ -192,11 +187,9 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
var statusCodeHeader string
for h, vs := range grpcResp.Header {
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
if strings.HasPrefix(h, grpc.MetadataPrefix) {
if h == grpc.WithPrefix(grpc.HttpCodeMetadataKey) {
if strings.HasPrefix(h, "Grpc-Metadata") {
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
statusCodeHeader = vs[0]
} else if strings.HasSuffix(h, api.VersionHeader) {
w.Header().Set(api.VersionHeader, vs[0])
}
} else {
for _, v := range vs {
@@ -230,7 +223,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
// Include custom error in the error JSON.
hasCustomError := false
if responseHeader != nil {
customError, ok := responseHeader[grpc.WithPrefix(grpc.CustomErrorMetadataKey)]
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
if ok {
hasCustomError = true
// Assume header has only one value and read the 0 index.

View File

@@ -8,7 +8,6 @@ import (
"strings"
"testing"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -281,8 +280,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
response := &http.Response{
Header: http.Header{
"Foo": []string{"foo"},
grpc.WithPrefix(grpc.HttpCodeMetadataKey): []string{"204"},
grpc.WithPrefix(api.VersionHeader): []string{"capella"},
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
},
}
container := defaultResponseContainer()
@@ -301,9 +299,6 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "224", v[0])
v, ok = writer.Header()["Eth-Consensus-Version"]
require.Equal(t, true, ok, "header not found")
assert.Equal(t, "capella", v[0])
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})
@@ -325,12 +320,11 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
t.Run("GET_invalid_status_code", func(t *testing.T) {
response := &http.Response{
Header: http.Header{"Grpc-Metadata-Eth-Consensus-Version": []string{"capella"}},
Header: http.Header{},
}
// Set invalid status code.
response.Header[grpc.WithPrefix(grpc.HttpCodeMetadataKey)] = []string{"invalid"}
response.Header[grpc.WithPrefix(api.VersionHeader)] = []string{"capella"}
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
@@ -396,7 +390,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
func TestWriteError(t *testing.T) {
t.Run("ok", func(t *testing.T) {
responseHeader := http.Header{
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"{\"CustomField\":\"bar\"}"},
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
}
errJson := &testErrorJson{
Message: "foo",
@@ -426,7 +420,7 @@ func TestWriteError(t *testing.T) {
logHook := test.NewGlobal()
responseHeader := http.Header{
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"invalid"},
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
}
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)

View File

@@ -6,11 +6,3 @@ const CustomErrorMetadataKey = "Custom-Error"
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
const HttpCodeMetadataKey = "X-Http-Code"
// MetadataPrefix is the prefix for grpc headers on metadata
const MetadataPrefix = "Grpc-Metadata"
// WithPrefix creates a new string with grpc metadata prefix
func WithPrefix(value string) string {
return MetadataPrefix + "-" + value
}

View File

@@ -1,7 +0,0 @@
package api
const (
VersionHeader = "Eth-Consensus-Version"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
)

View File

@@ -88,7 +88,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -340,13 +340,7 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
}
s.headLock.RLock()
headRoot := s.head.root
headSlot := s.head.slot
headOptimistic := s.head.optimistic
s.headLock.RUnlock()
// we trust the head package for recent head slots, otherwise fallback to forkchoice
if headSlot+2 >= s.CurrentSlot() {
return headOptimistic, nil
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
@@ -387,7 +381,7 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
@@ -499,13 +493,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -422,12 +422,6 @@ func TestService_IsOptimistic(t *testing.T) {
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, primitives.Slot(0), c.CurrentSlot())
require.Equal(t, false, opt)
c.SetGenesisTime(time.Now().Add(-time.Second * time.Duration(4*params.BeaconConfig().SecondsPerSlot)))
opt, err = c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}

View File

@@ -41,15 +41,13 @@ var (
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
lastValidHash [32]byte
root [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
LastValidHash() [32]byte
}
// BlockRoot returns the invalid block root.
@@ -57,11 +55,6 @@ func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// LastValidHash returns the last valid hash root.
func (e invalidBlock) LastValidHash() [32]byte {
return e.lastValidHash
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
@@ -79,19 +72,6 @@ func IsInvalidBlock(e error) bool {
return true
}
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
// doesn't have a last valid hash, [32]byte{} is returned.
func InvalidBlockLVH(e error) [32]byte {
if e == nil {
return [32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [32]byte{}
}
return d.LastValidHash()
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {

View File

@@ -182,24 +182,21 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
@@ -223,37 +220,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
}
case execution.ErrInvalidBlockHashPayloadStatus:
newPayloadInvalidNodeCount.Inc()
return false, ErrInvalidBlockHashPayloadStatus
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {

View File

@@ -525,13 +525,11 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "phase 0 post state",
postState: phase0State,
blk: altairBlk, // same as phase 0 for this test
isValidPayload: true,
},
{
name: "altair post state",
postState: altairState,
blk: altairBlk,
isValidPayload: true,
},
{
@@ -745,37 +743,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
require.Equal(t, true, validated)
}
func Test_reportInvalidBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
jcp := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx

View File

@@ -47,11 +47,9 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
optimistic bool // optimistic status when saved head
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// This saves head info to the local service cache, it also saves the
@@ -96,10 +94,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
@@ -131,6 +125,10 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
@@ -152,14 +150,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
}
// Cache the new head info.
newHead := &head{
root: newHeadRoot,
block: headBlock,
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
}
if err := s.setHead(newHead); err != nil {
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -182,7 +173,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState, optimistic bool) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -198,28 +189,26 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
if err != nil {
return err
}
if err := s.setHeadInitialSync(r, bCp, hs, optimistic); err != nil {
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
return errors.Wrap(err, "could not set head")
}
return nil
}
// This sets head view object which is used to track the head slot, root, block, state and optimistic status
func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
bCp, err := newHead.block.Copy()
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
optimistic: newHead.optimistic,
slot: newHead.slot,
root: root,
block: bCp,
state: state.Copy(),
}
return nil
}
@@ -227,7 +216,7 @@ func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState, optimistic bool) error {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -237,10 +226,9 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
return err
}
s.head = &head{
root: root,
block: bCp,
state: state,
optimistic: optimistic,
root: root,
block: bCp,
state: state,
}
return nil
}

View File

@@ -172,15 +172,11 @@ var (
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in validateStateTransition()",
})
chainServiceProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "chain_service_processing_milliseconds",
Help: "Total time to call a chain service in ReceiveBlock()",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
@@ -250,45 +246,40 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
slashingBalance := uint64(0)
slashingEffectiveBalance := uint64(0)
for i := 0; i < postState.NumValidators(); i++ {
validator, err := postState.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(i))
if err != nil {
log.WithError(err).Error("Could not load validator")
continue
}
for i, validator := range postState.Validators() {
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
if err != nil {
log.WithError(err).Error("Could not load validator balance")
continue
}
if validator.Slashed() {
if currentEpoch < validator.ExitEpoch() {
if validator.Slashed {
if currentEpoch < validator.ExitEpoch {
slashingInstances++
slashingBalance += bal
slashingEffectiveBalance += validator.EffectiveBalance()
slashingEffectiveBalance += validator.EffectiveBalance
} else {
slashedInstances++
}
continue
}
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
if currentEpoch < validator.ExitEpoch() {
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
if currentEpoch < validator.ExitEpoch {
exitingInstances++
exitingBalance += bal
exitingEffectiveBalance += validator.EffectiveBalance()
exitingEffectiveBalance += validator.EffectiveBalance
} else {
exitedInstances++
}
continue
}
if currentEpoch < validator.ActivationEpoch() {
if currentEpoch < validator.ActivationEpoch {
pendingInstances++
pendingBalance += bal
continue
}
activeInstances++
activeBalance += bal
activeEffectiveBalance += validator.EffectiveBalance()
activeEffectiveBalance += validator.EffectiveBalance
}
activeInstances += exitingInstances + slashingInstances
activeBalance += exitingBalance + slashingBalance

View File

@@ -22,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -39,11 +40,59 @@ const depositDeadline = 20 * time.Second
// This defines size of the upper bound for initial sync block cache.
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// postBlockProcess is called when a gossip block is received. This function performs
// several duties most importantly informing the engine if head was updated,
// saving the new head information to the blockchain package and
// handling attestations, slashings and similar included in the block.
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
// The block's signing root should be computed before calling this method to avoid redundant
// computation in this method and methods it calls into.
//
// Spec pseudocode definition:
//
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
//
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
//
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
//
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
@@ -52,7 +101,52 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
startTime := time.Now()
b := signed.Block()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return err
}
// Verify that the parent block is in forkchoice
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
}
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
if err != nil {
return errors.Wrap(err, "could not validate new payload")
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
@@ -66,6 +160,34 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
}
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
tracing.AnnotateError(span, err)
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
@@ -86,6 +208,18 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -95,12 +229,6 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
return err
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
if err != nil {
log.WithError(err).Debug("Could not check if block is optimistic")
optimistic = true
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
@@ -109,35 +237,57 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
BlockRoot: blockRoot,
SignedBlock: signed,
Verified: true,
Optimistic: optimistic,
},
})
defer reportAttestationInclusion(b)
if headRoot == blockRoot {
// Updating next slot state cache can happen in the background
// except in the epoch boundary in which case we lock to handle
// the shuffling and proposer caches updates.
// We handle these caches only on canonical
// blocks, otherwise this will be handled by lateBlockTasks
slot := postState.Slot()
if slots.IsEpochEnd(slot) {
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
return errors.Wrap(err, "could not handle epoch boundary")
}
} else {
go func() {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Error("could not update next slot state cache")
}
}()
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
// Save finalized check point to db and more.
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isOptimistic,
},
})
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
defer cancel()
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
@@ -259,7 +409,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
return err
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -329,51 +479,70 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update proposer index cache")
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
if err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
}()
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
headSt, err := s.HeadState(ctx)
if err != nil {
return err
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
return err
}
}
return nil
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
// return early if we are advancing to a past epoch
if slot < headState.Slot() {
return nil
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
}
if !slots.IsEpochEnd(slot) {
return nil
}
copied := headState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, slot+1)
if err != nil {
return err
}
return s.updateEpochBoundaryCaches(ctx, copied)
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
@@ -504,9 +673,8 @@ func (s *Service) runLateBlockTasks() {
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
// it also updates the next slot cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if s.CurrentSlot() == s.HeadSlot() {
return
}
@@ -514,10 +682,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
@@ -528,9 +694,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
@@ -565,10 +729,3 @@ func (s *Service) waitForSync() error {
return errors.New("context closed, exiting goroutine")
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
@@ -210,44 +209,35 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}
// inserts finalized deposits into our finalized deposit trie, needs to be
// called in the background
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
// inserts finalized deposits into our finalized deposit trie.
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
defer span.End()
startTime := time.Now()
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
log.WithError(err).Error("could not fetch finalized state")
return
return errors.Wrap(err, "could not fetch finalized state")
}
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
log.WithError(err).Error("could not cast eth1 deposit index")
return
return errors.Wrap(err, "could not cast eth1 deposit index")
}
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
finalizedEth1DepIdx := eth1DepositIndex - 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not insert finalized deposits")
return
eth1DepositIndex -= 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not prune deposit proofs")
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
// to the provided eth1 deposit index.
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
return nil
}
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling

View File

@@ -41,6 +41,103 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
ojc := &ethpb.Checkpoint{}
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
util.SaveBlock(t, ctx, beaconDB, random)
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
s state.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
s: st.Copy(),
wantErrString: "could not reconstruct parent state",
},
{
name: "block is from the future",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot2
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
}(),
s: st.Copy(),
wantErrString: "is in the far distant future",
},
{
name: "could not get finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
b.Block.Slot = 2
return b
}(),
s: st.Copy(),
wantErrString: "not descendant of finalized checkpoint",
},
{
name: "same slot as finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.Slot = 0
b.Block.ParentRoot = randomParentRoot2
return b
}(),
s: st.Copy(),
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fRoot := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
}
func TestStore_OnBlockBatch(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
@@ -539,7 +636,8 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s.Slot(), s, []byte{}))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
@@ -559,20 +657,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, fcs.NewSlot(ctx, i))
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -607,20 +692,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -642,7 +714,8 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
err := service.onBlock(tr.ctx, nil, [32]byte{})
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -656,11 +729,11 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
err = service.onBlock(ctx, wsb, r)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -684,13 +757,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -716,7 +783,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
@@ -725,45 +792,6 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
}
func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
var zeroSig [96]byte
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
depositCache.InsertPendingDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
pendingDeps := depositCache.PendingContainers(ctx, nil)
for _, d := range pendingDeps {
assert.DeepEqual(t, true, d.Index >= 8, "Pending deposits were not pruned")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
@@ -791,7 +819,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
// Insert 3 deposits before hand.
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
@@ -801,7 +829,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}
// Insert New Finalized State with higher deposit count.
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
@@ -1103,35 +1131,19 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
var wg sync.WaitGroup
wg.Add(4)
go func() {
preState, err := service.getBlockPreState(ctx, wsb1.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb2.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb3.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb4.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
@@ -1199,13 +1211,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1218,12 +1224,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1237,12 +1238,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1259,12 +1255,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1287,12 +1278,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
@@ -1315,13 +1301,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1378,12 +1358,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1396,12 +1371,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1415,13 +1385,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1438,12 +1402,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1466,12 +1425,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
// not finish importing and it was never imported to forkchoice). Check
@@ -1494,12 +1448,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1557,13 +1506,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1576,13 +1519,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1596,12 +1533,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1623,12 +1555,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
invalidRoots[i-13], err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
err = service.onBlock(ctx, wsb, invalidRoots[i-13])
require.NoError(t, err)
}
// Check that we have justified the second epoch
@@ -1649,12 +1576,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1688,12 +1610,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
require.NoError(t, service.onBlock(ctx, wsb, root))
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1711,12 +1628,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1736,13 +1648,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1793,12 +1699,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1811,12 +1712,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1830,12 +1726,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1856,18 +1747,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1886,11 +1766,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that the headroot/state are not in DB and restart the node
@@ -1972,12 +1848,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
st, err = service.HeadState(ctx)
require.NoError(t, err)

View File

@@ -62,7 +62,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
return err
}
if !bytes.Equal(a.Data.Target.Root, r) {
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
return errors.New("FFG and LMD votes are not consistent")
}
return nil
}
@@ -86,30 +86,23 @@ func (s *Service) spawnProcessAttestationsRoutine() {
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
case <-pat.C():
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, s.CurrentSlot())
}
}
}()

View File

@@ -128,13 +128,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -184,13 +178,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -7,23 +7,15 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
@@ -55,83 +47,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
// The rest of block processing takes a lock on forkchoice.
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return err
}
if coreTime.CurrentEpoch(postState) > currentEpoch {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
cancel()
}()
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
}
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
@@ -163,8 +86,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
log.WithError(err).Error("Unable to log state transition data")
}
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
return nil
}
@@ -185,13 +106,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
return err
}
lastBR := blkRoots[len(blkRoots)-1]
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(lastBR)
if err != nil {
lastSlot := blocks[len(blocks)-1].Block().Slot()
log.WithError(err).Errorf("Could not check if block is optimistic, Root: %#x, Slot: %d", lastBR, lastSlot)
optimistic = true
}
for i, b := range blocks {
blockCopy, err := b.Copy()
if err != nil {
@@ -205,7 +119,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
BlockRoot: blkRoots[i],
SignedBlock: blockCopy,
Verified: true,
Optimistic: optimistic,
},
})
@@ -313,109 +226,3 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}
// This performs the state transition function and returns the poststate or an
// error if the block fails to verify the consensus rules
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
b := signed.Block()
// Verify that the parent block is in forkchoice
parentRoot := b.ParentRoot()
if !s.InForkchoice(parentRoot) {
return nil, ErrNotDescendantOfFinalized
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return nil, invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
return postState, nil
}
// updateJustificationOnBlock updates the justified checkpoint on DB if the
// incoming block has updated it on forkchoice.
func (s *Service) updateJustificationOnBlock(ctx context.Context, preState, postState state.BeaconState, preJustifiedEpoch primitives.Epoch) error {
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > preJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
return nil
}
// updateFinalizationOnBlock performs some duties when the incoming block
// changes the finalized checkpoint. It returns true when this has happened.
func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postState state.BeaconState, preFinalizedEpoch primitives.Epoch) (bool, error) {
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > preFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return true, err
}
return true, nil
}
return false, nil
}
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
isValidPayload = s.head.optimistic
}
s.headLock.RUnlock()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isValidPayload,
},
})
}
// sendBlockAttestationsToSlasher sends the incoming block's attestation to the slasher
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
return isValidPayload, err
}
}
return isValidPayload, nil
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
@@ -35,6 +34,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -45,21 +45,22 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
}
// config options for the service.
@@ -307,13 +308,7 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if err := s.setHead(&head{
finalizedRoot,
finalizedBlock,
finalizedState,
finalizedBlock.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(finalizedRoot, finalizedBlock, finalizedState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -407,7 +402,7 @@ func (s *Service) initializeBeaconChain(
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}
@@ -445,13 +440,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
if err := s.setHead(&head{
genesisBlkRoot,
genesisBlk,
genesisState,
genesisBlk.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
log.WithError(err).Fatal("Could not set head")
}
return nil

View File

@@ -357,7 +357,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState, false))
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)
@@ -377,7 +377,9 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -451,7 +453,9 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -533,7 +533,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
return [32]byte{}
}
// SetForkChoiceGenesisTime mocks the same method in the chain service
// SetForkchoiceGenesisTime mocks the same method in the chain service
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
if s.ForkChoiceStore != nil {
s.ForkChoiceStore.SetGenesisTime(timestamp)

View File

@@ -13,15 +13,6 @@ import (
"go.opencensus.io/trace"
)
// AttDelta contains rewards and penalties for a single attestation.
type AttDelta struct {
HeadReward uint64
SourceReward uint64
SourcePenalty uint64
TargetReward uint64
TargetPenalty uint64
}
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
@@ -235,7 +226,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attDeltas, err := AttestationsDelta(beaconState, bal, vals)
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
@@ -246,12 +237,11 @@ func ProcessRewardsAndPenaltiesPrecompute(
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
delta := attDeltas[i]
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], delta.HeadReward+delta.SourceReward+delta.TargetReward)
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
@@ -265,8 +255,10 @@ func ProcessRewardsAndPenaltiesPrecompute(
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
attDeltas := make([]*AttDelta, len(vals))
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
@@ -280,29 +272,29 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
bias := cfg.InactivityScoreBias
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
if err != nil {
return nil, err
return nil, nil, err
}
inactivityDenominator := bias * inactivityPenaltyQuotient
for i, v := range vals {
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, err
return nil, nil, err
}
}
return attDeltas, nil
return rewards, penalties, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (*AttDelta, error) {
inactivityLeak bool) (reward, penalty uint64, err error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return &AttDelta{}, nil
return 0, 0, nil
}
cfg := params.BeaconConfig()
@@ -315,32 +307,32 @@ func attestationDelta(
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
attDelta := &AttDelta{}
reward, penalty = uint64(0), uint64(0)
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
attDelta.SourceReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.SourcePenalty += baseReward * srcWeight / weightDenominator
penalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
attDelta.TargetReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.TargetPenalty += baseReward * tgtWeight / weightDenominator
penalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
attDelta.HeadReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
}
@@ -349,10 +341,10 @@ func attestationDelta(
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return &AttDelta{}, err
return 0, 0, err
}
attDelta.TargetPenalty += n / inactivityDenominator
penalty += n / inactivityDenominator
}
return attDelta, nil
return reward, penalty, nil
}

View File

@@ -213,16 +213,9 @@ func TestAttestationsDelta(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -251,16 +244,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -299,15 +285,8 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
}
wanted := make([]uint64, s.NumValidators())
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
for i := range rewards {
wanted[i] += rewards[i]
}

View File

@@ -195,7 +195,6 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
}
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
return err
@@ -224,12 +223,13 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
syncErr := fmt.Errorf(
"sync message time %v (message slot %d) not within allowable range of %v to %v (current slot %d)",
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
messageTime,
slot,
lowerBound,
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
upperBound,
currentSlot,
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
)
// Wrap error message if sync message is too late.
if messageTime.Before(lowerBound) {

View File

@@ -311,7 +311,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 16,
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
},
wantedErr: "(message slot 16) not within allowable range of",
wantedErr: "(slot 16) not within allowable range of",
},
{
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
@@ -327,7 +327,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 100,
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
},
wantedErr: "(message slot 100) not within allowable range of",
wantedErr: "(slot 100) not within allowable range of",
},
{
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
@@ -343,7 +343,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 101,
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "(message slot 101) not within allowable range of",
wantedErr: "(slot 101) not within allowable range of",
},
{
name: "sync_message.slot is well beyond current slot",

View File

@@ -50,8 +50,6 @@ func ProcessVoluntaryExits(
beaconState state.BeaconState,
exits []*ethpb.SignedVoluntaryExit,
) (state.BeaconState, error) {
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
var exitEpoch primitives.Epoch
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
@@ -63,15 +61,8 @@ func ProcessVoluntaryExits(
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
if err == nil {
if exitEpoch > maxExitEpoch {
maxExitEpoch = exitEpoch
churn = 1
} else if exitEpoch == maxExitEpoch {
churn++
}
} else if !errors.Is(err, v.ValidatorAlreadyExitedErr) {
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)
if err != nil {
return nil, err
}
}

View File

@@ -110,11 +110,8 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
isActive := helpers.IsActiveValidator(validator, currentEpoch)
belowEjectionBalance := validator.EffectiveBalance <= ejectionBal
if isActive && belowEjectionBalance {
// Here is fine to do a quadratic loop since this should
// barely happen
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
state, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx))
if err != nil {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
}

View File

@@ -39,8 +39,6 @@ type BlockProcessedData struct {
SignedBlock interfaces.ReadOnlySignedBeaconBlock
// Verified is true if the block's BLS contents have been verified.
Verified bool
// Optimistic is true if the block is optimistic.
Optimistic bool
}
// ChainStartedData is the data sent with ChainStarted events.

View File

@@ -295,58 +295,61 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
for _, e := range []primitives.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
return nil
}
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
// Input state is used to retrieve active validator indices.
// Input epoch is the epoch to retrieve proposer indices for.
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState) error {
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
// Which is the reason why we skip genesis epoch.
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
if time.CurrentEpoch(state) <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
// Use state root from (current_epoch - 1))
s, err := slots.EpochEnd(epoch - 1)
wantedEpoch := time.PrevEpoch(state)
s, err := slots.EpochEnd(wantedEpoch)
if err != nil {
return err
}
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
r, err := StateRootAtSlot(state, s)
if err != nil {
return err
}
@@ -363,11 +366,11 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
return nil
}
indices, err := ActiveValidatorIndices(ctx, state, epoch)
indices, err := ActiveValidatorIndices(ctx, state, time.CurrentEpoch(state))
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
@@ -429,10 +432,11 @@ func computeCommittee(
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
e := time.CurrentEpoch(state)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return nil, errors.Wrap(err, "could not generate seed")

View File

@@ -413,7 +413,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(0)
epoch := primitives.Epoch(1)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
@@ -423,40 +423,6 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
for i := primitives.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 1,
}
indices[i] = i
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 300000)
for i := 0; i < len(validators); i++ {
@@ -673,7 +639,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := precomputeProposerIndices(state, indices)
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex

View File

@@ -184,7 +184,7 @@ func innerShuffleList(input []primitives.ValidatorIndex, seed [32]byte, shuffle
for {
buf[seedSize] = r
ph := hashfunc(buf[:pivotViewSize])
pivot := binary.LittleEndian.Uint64(ph[:8]) % listSize
pivot := bytesutil.FromBytes8(ph[:8]) % listSize
mirror := (pivot + 1) >> 1
binary.LittleEndian.PutUint32(buf[pivotViewSize:], uint32(pivot>>8))
source := hashfunc(buf)

View File

@@ -262,7 +262,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
}

View File

@@ -15,6 +15,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -13,34 +13,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ValidatorAlreadyExitedErr is an error raised when trying to process an exit of
// an already exited validator
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
// epoch and the number of them
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
e := val.ExitEpoch()
if e != farFutureEpoch {
if e > maxExitEpoch {
maxExitEpoch = e
churn = 1
} else if e == maxExitEpoch {
churn++
}
}
return nil
})
_ = err
return
}
// InitiateValidatorExit takes in validator index and updates
// validator with correct voluntary exit parameters.
//
@@ -65,43 +42,73 @@ func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitive
// # Set validator exit epoch and withdrawable epoch
// validator.exit_epoch = exit_queue_epoch
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitQueueEpoch {
exitQueueEpoch = exitableEpoch
churn = 0
}
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) (state.BeaconState, error) {
validator, err := s.ValidatorAtIndex(idx)
if err != nil {
return nil, 0, err
return nil, err
}
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, validator.ExitEpoch, ValidatorAlreadyExitedErr
return s, nil
}
var exitEpochs []primitives.Epoch
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch())
}
return nil
})
if err != nil {
return nil, err
}
exitEpochs = append(exitEpochs, helpers.ActivationExitEpoch(time.CurrentEpoch(s)))
// Obtain the exit queue epoch as the maximum number in the exit epochs array.
exitQueueEpoch := primitives.Epoch(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := uint64(0)
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() == exitQueueEpoch {
var mErr error
exitQueueChurn, mErr = mathutil.Add64(exitQueueChurn, 1)
if mErr != nil {
return mErr
}
}
return nil
})
if err != nil {
return nil, err
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return nil, 0, errors.Wrap(err, "could not get active validator count")
return nil, errors.Wrap(err, "could not get active validator count")
}
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, 0, errors.Wrap(err, "could not get churn limit")
return nil, errors.Wrap(err, "could not get churn limit")
}
if churn >= currentChurn {
if exitQueueChurn >= churn {
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
if err != nil {
return nil, 0, err
return nil, err
}
}
validator.ExitEpoch = exitQueueEpoch
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
if err != nil {
return nil, 0, err
return nil, err
}
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
return nil, 0, err
return nil, err
}
return s, exitQueueEpoch, nil
return s, nil
}
// SlashValidator slashes the malicious validator's balance and awards
@@ -137,9 +144,8 @@ func SlashValidator(
slashedIdx primitives.ValidatorIndex,
penaltyQuotient uint64,
proposerRewardQuotient uint64) (state.BeaconState, error) {
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
s, err := InitiateValidatorExit(ctx, s, slashedIdx)
if err != nil {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
}
currentEpoch := slots.ToEpoch(s.Slot())

View File

@@ -48,9 +48,8 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
require.ErrorIs(t, err, ValidatorAlreadyExitedErr)
require.Equal(t, exitEpoch, epoch)
newState, err := InitiateValidatorExit(context.Background(), state, 0)
require.NoError(t, err)
v, err := newState.ValidatorAtIndex(0)
require.NoError(t, err)
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
@@ -67,9 +66,8 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
newState, err := InitiateValidatorExit(context.Background(), state, idx)
require.NoError(t, err)
require.Equal(t, exitedEpoch+2, epoch)
v, err := newState.ValidatorAtIndex(idx)
require.NoError(t, err)
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
@@ -87,9 +85,8 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
newState, err := InitiateValidatorExit(context.Background(), state, idx)
require.NoError(t, err)
require.Equal(t, exitedEpoch+3, epoch)
// Because of exit queue overflow,
// validator who init exited has to wait one more epoch.
@@ -109,7 +106,7 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
_, _, err = InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
_, err = InitiateValidatorExit(context.Background(), state, 1)
require.ErrorContains(t, "addition overflows", err)
}
@@ -340,78 +337,3 @@ func TestExitedValidatorIndices(t *testing.T) {
assert.DeepEqual(t, tt.wanted, exitedIndices)
}
}
func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
tests := []struct {
state *ethpb.BeaconState
wantedEpoch primitives.Epoch
wantedChurn uint64
}{
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 0,
wantedChurn: 3,
},
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 0,
wantedChurn: 0,
},
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 1,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 1,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 1,
wantedChurn: 2,
},
}
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
require.Equal(t, tt.wantedEpoch, epoch)
require.Equal(t, tt.wantedChurn, churn)
}
}

View File

@@ -297,6 +297,9 @@ func (s *Service) ExchangeTransitionConfiguration(
}
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
if !features.Get().EnableOptionalEngineMethods {
return nil, errors.New("optional engine methods not enabled")
}
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
defer span.End()
@@ -488,6 +491,9 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
if !features.Get().EnableOptionalEngineMethods {
return nil, errors.New("optional engine methods not enabled")
}
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
defer span.End()
@@ -507,6 +513,9 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
if !features.Get().EnableOptionalEngineMethods {
return nil, errors.New("optional engine methods not enabled")
}
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
defer span.End()
@@ -551,7 +560,19 @@ func (s *Service) ReconstructFullBlock(
}
executionBlockHash := common.BytesToHash(header.BlockHash())
payload, err := s.retrievePayloadFromExecutionHash(ctx, executionBlockHash, header, blindedBlock.Version())
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
if err != nil {
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
}
if executionBlock == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
}
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
return nil, EmptyBlockHash
}
executionBlock.Version = blindedBlock.Version()
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
if err != nil {
return nil, err
}
@@ -598,9 +619,32 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
executionHashes = append(executionHashes, executionBlockHash)
}
}
fullBlocks, err := s.retrievePayloadsFromExecutionHashes(ctx, executionHashes, validExecPayloads, blindedBlocks)
execBlocks, err := s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
if err != nil {
return nil, err
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
}
// For each valid payload, we reconstruct the full block from it with the
// blinded block.
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
for sliceIdx, realIdx := range validExecPayloads {
b := execBlocks[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
}
header, err := blindedBlocks[realIdx].Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err := fullPayloadFromExecutionBlock(header, b)
if err != nil {
return nil, err
}
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
if err != nil {
return nil, err
}
fullBlocks[realIdx] = fullBlock
}
// For blocks that are pre-merge we simply reconstruct them via an empty
// execution payload.
@@ -616,95 +660,6 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
return fullBlocks, nil
}
func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) {
if features.Get().EnableOptionalEngineMethods {
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
if err != nil {
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
}
if len(pBodies) != 1 {
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
}
bdy := pBodies[0]
return fullPayloadFromPayloadBody(header, bdy, version)
}
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
if err != nil {
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
}
if executionBlock == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
}
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
return nil, EmptyBlockHash
}
executionBlock.Version = version
return fullPayloadFromExecutionBlock(header, executionBlock)
}
func (s *Service) retrievePayloadsFromExecutionHashes(
ctx context.Context,
executionHashes []common.Hash,
validExecPayloads []int,
blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
var execBlocks []*pb.ExecutionBlock
var payloadBodies []*pb.ExecutionPayloadBodyV1
var err error
if features.Get().EnableOptionalEngineMethods {
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
if err != nil {
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
}
} else {
execBlocks, err = s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
if err != nil {
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
}
}
// For each valid payload, we reconstruct the full block from it with the
// blinded block.
for sliceIdx, realIdx := range validExecPayloads {
var payload interfaces.ExecutionData
if features.Get().EnableOptionalEngineMethods {
b := payloadBodies[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
}
header, err := blindedBlocks[realIdx].Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromPayloadBody(header, b, blindedBlocks[realIdx].Version())
if err != nil {
return nil, err
}
} else {
b := execBlocks[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
}
header, err := blindedBlocks[realIdx].Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromExecutionBlock(header, b)
if err != nil {
return nil, err
}
}
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
if err != nil {
return nil, err
}
fullBlocks[realIdx] = fullBlock
}
return fullBlocks, nil
}
func fullPayloadFromExecutionBlock(
header interfaces.ExecutionData, block *pb.ExecutionBlock,
) (interfaces.ExecutionData, error) {
@@ -766,50 +721,6 @@ func fullPayloadFromExecutionBlock(
}, 0) // We can't get the block value and don't care about the block value for this instance
}
func fullPayloadFromPayloadBody(
header interfaces.ExecutionData, body *pb.ExecutionPayloadBodyV1, bVersion int,
) (interfaces.ExecutionData, error) {
if header.IsNil() || body == nil {
return nil, errors.New("execution block and header cannot be nil")
}
if bVersion == version.Bellatrix {
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
})
}
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.
func handleRPCError(err error) error {
if err == nil {

View File

@@ -39,7 +39,7 @@ func New() *ForkChoice {
b := make([]uint64, 0)
v := make([]Vote, 0)
return &ForkChoice{store: s, balances: b, votes: v}
return &ForkChoice{store: s, balances: b, votes: v, fcLock: new(fcLock)}
}
// NodeCount returns the current number of nodes in the Store.

View File

@@ -7,7 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
@@ -16,7 +16,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, la
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
// return early if the parent is LVH
if node.payloadHash == lastValidHash {
if node.payloadHash == payloadHash {
return invalidRoots, nil
}
} else {
@@ -28,7 +28,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, la
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}

View File

@@ -1,7 +1,11 @@
package doublylinkedtree
import (
"bytes"
"runtime/debug"
"runtime/pprof"
"sync"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
@@ -11,7 +15,7 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
sync.RWMutex
*fcLock
store *Store
votes []Vote // tracks individual validator's last vote.
balances []uint64 // tracks individual validator's balances last accounted in votes.
@@ -68,3 +72,52 @@ type Vote struct {
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch primitives.Epoch // epoch of next voting period.
}
type fcLock struct {
lk sync.RWMutex
t time.Time
currChan chan int
}
func (f *fcLock) Lock() {
f.lk.Lock()
f.t = time.Now()
f.currChan = make(chan int)
go func(t time.Time, c chan int) {
tim := time.NewTimer(3 * time.Second)
select {
case <-c:
tim.Stop()
case <-tim.C:
tim.Stop()
pfile := pprof.Lookup("goroutine")
bf := bytes.NewBuffer([]byte{})
err := pfile.WriteTo(bf, 1)
_ = err
log.Warnf("FC lock is taking longer than 3 seconds with the complete stack of %s", bf.String())
}
}(time.Now(), f.currChan)
}
func (f *fcLock) Unlock() {
t := time.Since(f.t)
f.t = time.Time{}
close(f.currChan)
f.lk.Unlock()
if t > time.Second {
log.Warnf("FC lock is taking longer than 1 second: %s with the complete stack of %s", t.String(), string(debug.Stack()))
}
}
func (f *fcLock) RLock() {
t := time.Now()
f.lk.RLock()
dt := time.Since(t)
if dt > time.Second {
log.Warnf("FC Rlock is taking longer than 1 second: %s with stack %s", dt.String(), string(debug.Stack()))
}
}
func (f *fcLock) RUnlock() {
f.lk.RUnlock()
}

View File

@@ -158,7 +158,8 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
},
Action: func(cliCtx *cli.Context) error {
require.Equal(t, true, cliCtx.IsSet(cmd.BootstrapNode.Name))
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
require.Equal(t, false, cliCtx.IsSet(cmd.BootstrapNode.Name))
require.Equal(t, strings.Join([]string{"node1", "node2"}, ","),
strings.Join(cliCtx.StringSlice(cmd.BootstrapNode.Name), ","))

View File

@@ -230,8 +230,8 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
log.Debugln("Registering Determinstic Genesis Service")
if err := beacon.registerDeterminsticGenesisService(); err != nil {
return nil, err
}
@@ -924,7 +924,7 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
return b.services.RegisterService(g)
}
func (b *BeaconNode) registerDeterministicGenesisService() error {
func (b *BeaconNode) registerDeterminsticGenesisService() error {
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
@@ -987,8 +987,7 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
if cliCtx.Bool(flags.EnableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())
}
svc, err := builder.NewService(b.ctx, opts...)

View File

@@ -47,7 +47,6 @@ go_test(
deps = [
"//async:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -14,7 +14,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
@@ -40,8 +39,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -2,12 +2,9 @@ package kv
import (
"context"
"runtime"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -26,11 +23,21 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
if err != nil {
return err
}
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
// AggregateUnaggregatedAttestationsBySlotIndex aggregates the unaggregated attestations and saves
// newly aggregated attestations in the pool. Unaggregated attestations are filtered by slot and
// committee index.
func (c *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregateUnaggregatedAttestationsBySlotIndex")
defer span.End()
unaggregatedAtts := c.UnaggregatedAttestationsBySlotIndex(ctx, slot, committeeIndex)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
defer span.End()
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
@@ -45,32 +52,26 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
// Aggregate unaggregated attestations from the pool and save them in the pool.
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
if features.Get().AggregateParallel {
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
} else {
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
return err
}
leftOverUnaggregatedAtt[h] = true
}
}
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
@@ -87,58 +88,6 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
return nil
}
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
// returns the unaggregated attestations that weren't able to aggregate.
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
var leftoverLock sync.Mutex
wg := sync.WaitGroup{}
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
ch := make(chan []*ethpb.Attestation, n)
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer wg.Done()
for as := range ch {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
if err != nil {
log.WithError(err).Error("could not aggregate unaggregated attestations")
continue
}
if aggregated == nil {
log.Error("nil aggregated attestation")
continue
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
}
} else {
h, err := hashFn(aggregated)
if err != nil {
log.WithError(err).Error("could not hash attestation")
continue
}
leftoverLock.Lock()
leftOver[h] = true
leftoverLock.Unlock()
}
}
}()
}
for _, as := range atts {
ch <- as
}
close(ch)
wg.Wait()
return leftOver
}
// SaveAggregatedAttestation saves an aggregated attestation in cache.
func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
@@ -216,7 +165,7 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]*ethpb.Attestation, 0)

View File

@@ -9,7 +9,7 @@ import (
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -18,11 +18,6 @@ import (
)
func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
cache := NewAttCaches()
priv, err := bls.RandKey()
require.NoError(t, err)
@@ -44,6 +39,61 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 2, 0)), "Did not aggregate correctly")
}
func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T) {
cache := NewAttCaches()
genData := func(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) *ethpb.AttestationData {
return util.HydrateAttestationData(&ethpb.AttestationData{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
genSign := func() []byte {
priv, err := bls.RandKey()
require.NoError(t, err)
return priv.Sign([]byte{'a'}).Marshal()
}
atts := []*ethpb.Attestation{
// The first slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 3), Signature: genSign()},
// The second slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(2, 4), Signature: genSign()},
}
ctx := context.Background()
// Make sure that no error is produced if aggregation is requested on empty unaggregated list.
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Persist unaggregated attestations, and aggregate on per slot/committee index base.
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
// Committee attestations at a slot should be aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
// Committee attestations haven't been aggregated.
require.Equal(t, 2, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Committee at a second slot is aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 3)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 3)), "Did not aggregate correctly")
// The second committee at second slot is not aggregated.
require.Equal(t, 1, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 4)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 4)), "Did not aggregate correctly")
}
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -15,6 +15,7 @@ import (
type Pool interface {
// For Aggregated attestations
AggregateUnaggregatedAttestations(ctx context.Context) error
AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error
SaveAggregatedAttestation(att *ethpb.Attestation) error
SaveAggregatedAttestations(atts []*ethpb.Attestation) error
AggregatedAttestations() []*ethpb.Attestation

View File

@@ -34,17 +34,14 @@ func (s *Service) prepareForkChoiceAtts() {
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
for {
select {
case slotInterval := <-ticker.C():
case <-ticker.C():
t := time.Now()
if err := s.batchForkChoiceAtts(s.ctx); err != nil {
log.WithError(err).Error("Could not prepare attestations for fork choice")
}
switch slotInterval.Interval {
case 0:
duration := time.Since(t)
log.WithField("Duration", duration).Debug("aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1:
if slots.TimeIntoSlot(s.genesisTime) < intervals[1] {
batchForkChoiceAttsT1.Observe(float64(time.Since(t).Milliseconds()))
} else if slots.TimeIntoSlot(s.genesisTime) < intervals[2] {
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
}
case <-s.ctx.Done():

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -18,11 +17,6 @@ import (
)
func TestBatchAttestations_Multiple(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)

View File

@@ -108,31 +108,12 @@ func (s *Store) DeletePeerData(pid peer.ID) {
}
// SetTrustedPeers sets our desired trusted peer set.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) SetTrustedPeers(peers []peer.ID) {
for _, p := range peers {
s.trustedPeers[p] = true
}
}
// GetTrustedPeers gets our desired trusted peer ids.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) GetTrustedPeers() []peer.ID {
peers := []peer.ID{}
for p := range s.trustedPeers {
peers = append(peers, p)
}
return peers
}
// DeleteTrustedPeers removes peers from trusted peer set.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) DeleteTrustedPeers(peers []peer.ID) {
for _, p := range peers {
delete(s.trustedPeers, p)
}
}
// Peers returns map of peer data objects.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) Peers() map[peer.ID]*PeerData {

View File

@@ -96,16 +96,4 @@ func TestStore_TrustedPeers(t *testing.T) {
assert.Equal(t, true, store.IsTrustedPeer(pid1))
assert.Equal(t, true, store.IsTrustedPeer(pid2))
assert.Equal(t, true, store.IsTrustedPeer(pid3))
tPeers = store.GetTrustedPeers()
assert.Equal(t, 3, len(tPeers))
store.DeleteTrustedPeers(tPeers)
tPeers = store.GetTrustedPeers()
assert.Equal(t, 0, len(tPeers))
assert.Equal(t, false, store.IsTrustedPeer(pid1))
assert.Equal(t, false, store.IsTrustedPeer(pid2))
assert.Equal(t, false, store.IsTrustedPeer(pid3))
}

View File

@@ -560,9 +560,6 @@ func (p *Status) Prune() {
notBadPeer := func(pid peer.ID) bool {
return !p.isBad(pid)
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
}
type peerResp struct {
pid peer.ID
score float64
@@ -570,8 +567,7 @@ func (p *Status) Prune() {
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
score: p.Scorers().ScoreNoLock(pid),
@@ -612,9 +608,6 @@ func (p *Status) deprecatedPrune() {
notBadPeer := func(peerData *peerdata.PeerData) bool {
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
}
type peerResp struct {
pid peer.ID
badResp int
@@ -622,8 +615,7 @@ func (p *Status) deprecatedPrune() {
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
@@ -920,32 +912,6 @@ func (p *Status) SetTrustedPeers(peers []peer.ID) {
p.store.SetTrustedPeers(peers)
}
// GetTrustedPeers returns a list of all trusted peers' ids
func (p *Status) GetTrustedPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
return p.store.GetTrustedPeers()
}
// DeleteTrustedPeers removes peers from trusted peer set
func (p *Status) DeleteTrustedPeers(peers []peer.ID) {
p.store.Lock()
defer p.store.Unlock()
p.store.DeleteTrustedPeers(peers)
}
// IsTrustedPeers returns if given peer is a Trusted peer
func (p *Status) IsTrustedPeers(pid peer.ID) bool {
p.store.RLock()
defer p.store.RUnlock()
return p.isTrustedPeers(pid)
}
// isTrustedPeers is the lock-free version of IsTrustedPeers.
func (p *Status) isTrustedPeers(pid peer.ID) bool {
return p.store.IsTrustedPeer(pid)
}
// this method assumes the store lock is acquired before
// executing the method.
func (p *Status) isfromBadIP(pid peer.ID) bool {

View File

@@ -802,11 +802,6 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
}
}
p.SetTrustedPeers(trustedPeers)
// Assert we have correct trusted peers
trustedPeers = p.GetTrustedPeers()
assert.Equal(t, 6, len(trustedPeers))
// Assert all peers more than max are prunable.
peersToPrune = p.PeersToPrune()
assert.Equal(t, 16, len(peersToPrune))
@@ -817,34 +812,6 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
assert.NotEqual(t, pid.String(), tPid.String())
}
}
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
for i := 0; i < 9; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Delete trusted peers.
p.DeleteTrustedPeers(trustedPeers)
peersToPrune = p.PeersToPrune()
assert.Equal(t, 25, len(peersToPrune))
// Check that trusted peers are pruned.
for _, tPid := range trustedPeers {
pruned := false
for _, pid := range peersToPrune {
if pid.String() == tPid.String() {
pruned = true
}
}
assert.Equal(t, true, pruned)
}
// Assert have zero trusted peers
trustedPeers = p.GetTrustedPeers()
assert.Equal(t, 0, len(trustedPeers))
for _, pid := range peersToPrune {
dir, err := p.Direction(pid)
require.NoError(t, err)
@@ -854,8 +821,8 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Ensure it is in the descending order.
currScore := p.Scorers().Score(peersToPrune[0])
for _, pid := range peersToPrune {
score := p.Scorers().Score(pid)
assert.Equal(t, true, currScore <= score)
score := p.Scorers().BadResponsesScorer().Score(pid)
assert.Equal(t, true, currScore >= score)
currScore = score
}
}

View File

@@ -174,9 +174,9 @@ func (s *Service) Start() {
s.awaitStateInitialized()
s.isPreGenesis = false
var relayNodes []string
var peersToWatch []string
if s.cfg.RelayNodeAddr != "" {
relayNodes = append(relayNodes, s.cfg.RelayNodeAddr)
peersToWatch = append(peersToWatch, s.cfg.RelayNodeAddr)
if err := dialRelayNode(s.ctx, s.host, s.cfg.RelayNodeAddr); err != nil {
log.WithError(err).Errorf("Could not dial relay node")
}
@@ -213,7 +213,8 @@ func (s *Service) Start() {
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
s.peers.SetTrustedPeers(pids)
s.connectWithAllTrustedPeers(addrs)
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
s.connectWithAllPeers(addrs)
}
// Initialize metadata according to the
// current epoch.
@@ -225,7 +226,7 @@ func (s *Service) Start() {
// Periodic functions.
async.RunEvery(s.ctx, params.BeaconNetworkConfig().TtfbTimeout, func() {
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
ensurePeerConnections(s.ctx, s.host, peersToWatch...)
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
@@ -398,24 +399,6 @@ func (s *Service) awaitStateInitialized() {
}
}
func (s *Service) connectWithAllTrustedPeers(multiAddrs []multiaddr.Multiaddr) {
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {
log.WithError(err).Error("Could not convert to peer address info's from multiaddresses")
return
}
for _, info := range addrInfos {
// add peer into peer status
s.peers.Add(nil, info.ID, info.Addrs[0], network.DirUnknown)
// make each dial non-blocking
go func(info peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
}(info)
}
}
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {

View File

@@ -5,52 +5,28 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
)
// ensurePeerConnections will attempt to reestablish connection to the peers
// if there are currently no connections to that peer.
func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status, relayNodes ...string) {
// every time reset peersToWatch, add RelayNodes and trust peers
var peersToWatch []*peer.AddrInfo
// add RelayNodes
for _, node := range relayNodes {
if node == "" {
func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
if len(peers) == 0 {
return
}
for _, p := range peers {
if p == "" {
continue
}
peerInfo, err := MakePeer(node)
peerInfo, err := MakePeer(p)
if err != nil {
log.WithError(err).Error("Could not make peer")
continue
}
peersToWatch = append(peersToWatch, peerInfo)
}
// add trusted peers
trustedPeers := peers.GetTrustedPeers()
for _, trustedPeer := range trustedPeers {
maddr, err := peers.Address(trustedPeer)
// avoid invalid trusted peers
if err != nil || maddr == nil {
log.WithField("peer", trustedPeers).WithError(err).Error("Could not get peer address")
continue
}
peerInfo := &peer.AddrInfo{ID: trustedPeer}
peerInfo.Addrs = []ma.Multiaddr{maddr}
peersToWatch = append(peersToWatch, peerInfo)
}
if len(peersToWatch) == 0 {
return
}
for _, p := range peersToWatch {
c := h.Network().ConnsToPeer(p.ID)
c := h.Network().ConnsToPeer(peerInfo.ID)
if len(c) == 0 {
if err := connectWithTimeout(ctx, h, p); err != nil {
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
if err := connectWithTimeout(ctx, h, peerInfo); err != nil {
log.WithField("peer", peerInfo.ID).WithField("addrs", peerInfo.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
continue
}
}

View File

@@ -24,21 +24,17 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/prysm/validator:go_default_library",
"//beacon-chain/slasher:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -12,18 +12,17 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/eth/v2:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
],
)
@@ -36,7 +35,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
@@ -46,6 +44,6 @@ go_test(
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
],
)

View File

@@ -12,12 +12,18 @@ import (
"strconv"
"strings"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/r3labs/sse/v2"
"github.com/r3labs/sse"
)
const (
versionHeader = "Eth-Consensus-Version"
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
jsonMediaType = "application/json"
octetStreamMediaType = "application/octet-stream"
)
// match a number with optional decimals
@@ -217,7 +223,7 @@ func sszRequested(req *http.Request) (bool, error) {
for _, t := range types {
values := strings.Split(t, ";")
name := values[0]
if name != api.JsonMediaType && name != api.OctetStreamMediaType {
if name != jsonMediaType && name != octetStreamMediaType {
continue
}
// no params specified
@@ -242,7 +248,7 @@ func sszRequested(req *http.Request) (bool, error) {
}
}
return currentType == api.OctetStreamMediaType, nil
return currentType == octetStreamMediaType, nil
}
func sszPosted(req *http.Request) bool {
@@ -253,7 +259,7 @@ func sszPosted(req *http.Request) bool {
if len(ct) != 1 {
return false
}
return ct[0] == api.OctetStreamMediaType
return ct[0] == octetStreamMediaType
}
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
@@ -272,10 +278,10 @@ func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint
}
func prepareCustomHeaders(req *http.Request) {
ver := req.Header.Get(api.VersionHeader)
ver := req.Header.Get(versionHeader)
if ver != "" {
req.Header.Del(api.VersionHeader)
req.Header.Add(grpc.WithPrefix(api.VersionHeader), ver)
req.Header.Del(versionHeader)
req.Header.Add(grpcVersionHeader, ver)
}
}
@@ -291,7 +297,7 @@ func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
}
req.Body = io.NopCloser(bytes.NewBuffer(data))
req.ContentLength = int64(len(data))
req.Header.Set("Content-Type", api.JsonMediaType)
req.Header.Set("Content-Type", jsonMediaType)
return nil
}
@@ -319,9 +325,9 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
}
}
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", api.OctetStreamMediaType)
w.Header().Set("Content-Type", octetStreamMediaType)
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
w.Header().Set(api.VersionHeader, respVersion)
w.Header().Set(versionHeader, respVersion)
if statusCodeHeader != "" {
code, err := strconv.Atoi(statusCodeHeader)
if err != nil {

View File

@@ -11,13 +11,12 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/r3labs/sse/v2"
"github.com/r3labs/sse"
)
type testSSZResponseJson struct {
@@ -46,7 +45,7 @@ func (t testSSZResponseJson) SSZFinalized() bool {
func TestSSZRequested(t *testing.T) {
t.Run("ssz_requested", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{api.OctetStreamMediaType}
request.Header["Accept"] = []string{octetStreamMediaType}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
@@ -54,7 +53,7 @@ func TestSSZRequested(t *testing.T) {
t.Run("ssz_content_type_first", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", api.OctetStreamMediaType, api.JsonMediaType)}
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", octetStreamMediaType, jsonMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
@@ -62,7 +61,7 @@ func TestSSZRequested(t *testing.T) {
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", api.JsonMediaType, api.OctetStreamMediaType)}
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", jsonMediaType, octetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
@@ -70,7 +69,7 @@ func TestSSZRequested(t *testing.T) {
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", api.OctetStreamMediaType, api.JsonMediaType)}
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", octetStreamMediaType, jsonMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
@@ -78,7 +77,7 @@ func TestSSZRequested(t *testing.T) {
t.Run("other_content_type_preferred", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", api.JsonMediaType, api.OctetStreamMediaType)}
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", jsonMediaType, octetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
@@ -86,7 +85,7 @@ func TestSSZRequested(t *testing.T) {
t.Run("other_params", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", api.JsonMediaType, api.OctetStreamMediaType)}
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", jsonMediaType, octetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
@@ -154,7 +153,7 @@ func TestPreparePostedSszData(t *testing.T) {
preparePostedSSZData(request)
assert.Equal(t, int64(19), request.ContentLength)
assert.Equal(t, api.JsonMediaType, request.Header.Get("Content-Type"))
assert.Equal(t, jsonMediaType, request.Header.Get("Content-Type"))
}
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
@@ -210,12 +209,12 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
v, ok = writer.Header()["Content-Type"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, api.OctetStreamMediaType, v[0])
assert.Equal(t, octetStreamMediaType, v[0])
v, ok = writer.Header()["Content-Disposition"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "attachment; filename=test.ssz", v[0])
v, ok = writer.Header()[api.VersionHeader]
v, ok = writer.Header()[versionHeader]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "version", v[0])

View File

@@ -63,6 +63,51 @@ func wrapFeeRecipientsArray(
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct.
func wrapSignedValidatorRegistrationsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SignedValidatorRegistrationsRequestJson); !ok {
return true, nil
}
registrations := make([]*SignedValidatorRegistrationJson, 0)
if err := json.NewDecoder(req.Body).Decode(&registrations); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SignedValidatorRegistrationsRequestJson{Registrations: registrations}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapAttestationsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitAttestationRequestJson); ok {
atts := make([]*AttestationJson, 0)
if err := json.NewDecoder(req.Body).Decode(&atts); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitAttestationRequestJson{Data: atts}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array of validator indices.
// We make it more proto-friendly by wrapping it in a struct with an 'Index' field.
func wrapValidatorIndicesArray(
@@ -85,6 +130,72 @@ func wrapValidatorIndicesArray(
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/publishAggregateAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedAggregateAndProofArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitAggregateAndProofsRequestJson); ok {
data := make([]*SignedAggregateAttestationAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitAggregateAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconCommitteeSubnet expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapBeaconCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitBeaconCommitteeSubscriptionsRequestJson); ok {
data := make([]*BeaconCommitteeSubscribeJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitBeaconCommitteeSubscriptionsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/prepareSyncCommitteeSubnets expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitSyncCommitteeSubscriptionRequestJson); ok {
data := make([]*SyncCommitteeSubscriptionJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitSyncCommitteeSubscriptionRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolSyncCommitteeSignatures expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSignaturesArray(
@@ -107,6 +218,28 @@ func wrapSyncCommitteeSignaturesArray(
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/publishContributionAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedContributionAndProofsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitContributionAndProofsRequestJson); ok {
data := make([]*SignedContributionAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitContributionAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
type phase0PublishBlockRequestJson struct {
Phase0Block *BeaconBlockJson `json:"phase0_block"`
Signature string `json:"signature" hex:"true"`

View File

@@ -19,6 +19,46 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestWrapAttestationArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAttestationRequestJson{},
}
unwrappedAtts := []*AttestationJson{{AggregationBits: "1010"}}
unwrappedAttsJson, err := json.Marshal(unwrappedAtts)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedAttsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedAtts := &SubmitAttestationRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAtts))
require.Equal(t, 1, len(wrappedAtts.Data), "wrong number of wrapped items")
assert.Equal(t, "1010", wrappedAtts.Data[0].AggregationBits)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAttestationRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapValidatorIndicesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
@@ -100,6 +140,151 @@ func TestWrapBLSChangesArray(t *testing.T) {
})
}
func TestWrapSignedAggregateAndProofArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
unwrappedAggs := []*SignedAggregateAttestationAndProofJson{{Signature: "sig"}}
unwrappedAggsJson, err := json.Marshal(unwrappedAggs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedAggsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedAggs := &SubmitAggregateAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAggs))
require.Equal(t, 1, len(wrappedAggs.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrappedAggs.Data[0].Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapBeaconCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
unwrappedSubs := []*BeaconCommitteeSubscribeJson{{
ValidatorIndex: "1",
CommitteeIndex: "1",
CommitteesAtSlot: "1",
Slot: "1",
IsAggregator: true,
}}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitBeaconCommitteeSubscriptionsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 1, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteeIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteesAtSlot)
assert.Equal(t, "1", wrappedSubs.Data[0].Slot)
assert.Equal(t, true, wrappedSubs.Data[0].IsAggregator)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
unwrappedSubs := []*SyncCommitteeSubscriptionJson{
{
ValidatorIndex: "1",
SyncCommitteeIndices: []string{"1", "2"},
UntilEpoch: "1",
},
{
ValidatorIndex: "2",
SyncCommitteeIndices: []string{"3", "4"},
UntilEpoch: "2",
},
}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitSyncCommitteeSubscriptionRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 2, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
require.Equal(t, 2, len(wrappedSubs.Data[0].SyncCommitteeIndices), "wrong number of committee indices")
assert.Equal(t, "1", wrappedSubs.Data[0].SyncCommitteeIndices[0])
assert.Equal(t, "2", wrappedSubs.Data[0].SyncCommitteeIndices[1])
assert.Equal(t, "1", wrappedSubs.Data[0].UntilEpoch)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
@@ -148,6 +333,75 @@ func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
})
}
func TestWrapSignedContributionAndProofsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
unwrapped := []*SignedContributionAndProofJson{
{
Message: &ContributionAndProofJson{
AggregatorIndex: "1",
Contribution: &SyncCommitteeContributionJson{
Slot: "1",
BeaconBlockRoot: "root",
SubcommitteeIndex: "1",
AggregationBits: "bits",
Signature: "sig",
},
SelectionProof: "proof",
},
Signature: "sig",
},
{
Message: &ContributionAndProofJson{},
Signature: "sig",
},
}
unwrappedJson, err := json.Marshal(unwrapped)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrapped := &SubmitContributionAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrapped))
require.Equal(t, 2, len(wrapped.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrapped.Data[0].Signature)
require.NotNil(t, wrapped.Data[0].Message)
msg := wrapped.Data[0].Message
assert.Equal(t, "1", msg.AggregatorIndex)
assert.Equal(t, "proof", msg.SelectionProof)
require.NotNil(t, msg.Contribution)
assert.Equal(t, "1", msg.Contribution.Slot)
assert.Equal(t, "root", msg.Contribution.BeaconBlockRoot)
assert.Equal(t, "1", msg.Contribution.SubcommitteeIndex)
assert.Equal(t, "bits", msg.Contribution.AggregationBits)
assert.Equal(t, "sig", msg.Contribution.Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()

View File

@@ -32,10 +32,13 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/beacon/blinded_blocks",
"/eth/v1/beacon/blocks/{block_id}",
"/eth/v2/beacon/blocks/{block_id}",
"/eth/v1/beacon/blocks/{block_id}/root",
"/eth/v1/beacon/blocks/{block_id}/attestations",
"/eth/v1/beacon/blinded_blocks/{block_id}",
"/eth/v1/beacon/pool/attestations",
"/eth/v1/beacon/pool/attester_slashings",
"/eth/v1/beacon/pool/proposer_slashings",
"/eth/v1/beacon/pool/voluntary_exits",
"/eth/v1/beacon/pool/bls_to_execution_changes",
"/eth/v1/beacon/pool/sync_committees",
"/eth/v1/beacon/pool/bls_to_execution_changes",
@@ -45,6 +48,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/node/peers/{peer_id}",
"/eth/v1/node/peer_count",
"/eth/v1/node/version",
"/eth/v1/node/syncing",
"/eth/v1/node/health",
"/eth/v1/debug/beacon/states/{state_id}",
"/eth/v2/debug/beacon/states/{state_id}",
@@ -61,7 +65,15 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/blocks/{slot}",
"/eth/v2/validator/blocks/{slot}",
"/eth/v1/validator/blinded_blocks/{slot}",
"/eth/v1/validator/attestation_data",
"/eth/v1/validator/aggregate_attestation",
"/eth/v1/validator/beacon_committee_subscriptions",
"/eth/v1/validator/sync_committee_subscriptions",
"/eth/v1/validator/aggregate_and_proofs",
"/eth/v1/validator/sync_committee_contribution",
"/eth/v1/validator/contribution_and_proofs",
"/eth/v1/validator/prepare_beacon_proposer",
"/eth/v1/validator/register_validator",
"/eth/v1/validator/liveness/{epoch}",
}
}
@@ -124,6 +136,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZV2}
case "/eth/v1/beacon/blocks/{block_id}/root":
endpoint.GetResponse = &BlockRootResponseJson{}
case "/eth/v1/beacon/blocks/{block_id}/attestations":
endpoint.GetResponse = &BlockAttestationsResponseJson{}
case "/eth/v1/beacon/blinded_blocks/{block_id}":
@@ -132,12 +146,23 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlindedBeaconBlockSSZ}
case "/eth/v1/beacon/pool/attestations":
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
endpoint.GetResponse = &AttestationsPoolResponseJson{}
endpoint.PostRequest = &SubmitAttestationRequestJson{}
endpoint.Err = &IndexedVerificationFailureErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapAttestationsArray,
}
case "/eth/v1/beacon/pool/attester_slashings":
endpoint.PostRequest = &AttesterSlashingJson{}
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
case "/eth/v1/beacon/pool/proposer_slashings":
endpoint.PostRequest = &ProposerSlashingJson{}
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
case "/eth/v1/beacon/pool/voluntary_exits":
endpoint.PostRequest = &SignedVoluntaryExitJson{}
endpoint.GetResponse = &VoluntaryExitsPoolResponseJson{}
case "/eth/v1/beacon/pool/bls_to_execution_changes":
endpoint.PostRequest = &SubmitBLSToExecutionChangesRequest{}
endpoint.GetResponse = &BLSToExecutionChangesPoolResponseJson{}
@@ -165,6 +190,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.GetResponse = &PeerCountResponseJson{}
case "/eth/v1/node/version":
endpoint.GetResponse = &VersionResponseJson{}
case "/eth/v1/node/syncing":
endpoint.GetResponse = &SyncingResponseJson{}
case "/eth/v1/node/health":
// Use default endpoint
case "/eth/v1/debug/beacon/states/{state_id}":
@@ -233,11 +260,47 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
case "/eth/v1/validator/attestation_data":
endpoint.GetResponse = &ProduceAttestationDataResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
case "/eth/v1/validator/aggregate_attestation":
endpoint.GetResponse = &AggregateAttestationResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "attestation_data_root", Hex: true}, {Name: "slot"}}
case "/eth/v1/validator/beacon_committee_subscriptions":
endpoint.PostRequest = &SubmitBeaconCommitteeSubscriptionsRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapBeaconCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/sync_committee_subscriptions":
endpoint.PostRequest = &SubmitSyncCommitteeSubscriptionRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/aggregate_and_proofs":
endpoint.PostRequest = &SubmitAggregateAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedAggregateAndProofArray,
}
case "/eth/v1/validator/sync_committee_contribution":
endpoint.GetResponse = &ProduceSyncCommitteeContributionResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "subcommittee_index"}, {Name: "beacon_block_root", Hex: true}}
case "/eth/v1/validator/contribution_and_proofs":
endpoint.PostRequest = &SubmitContributionAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedContributionAndProofsArray,
}
case "/eth/v1/validator/prepare_beacon_proposer":
endpoint.PostRequest = &FeeRecipientsRequestJSON{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
}
case "/eth/v1/validator/register_validator":
endpoint.PostRequest = &SignedValidatorRegistrationsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
}
case "/eth/v1/validator/liveness/{epoch}":
endpoint.PostRequest = &ValidatorIndicesJson{}
endpoint.PostResponse = &LivenessResponseJson{}

View File

@@ -4,7 +4,7 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
)
@@ -143,6 +143,14 @@ type BlockAttestationsResponseJson struct {
Finalized bool `json:"finalized"`
}
type AttestationsPoolResponseJson struct {
Data []*AttestationJson `json:"data"`
}
type SubmitAttestationRequestJson struct {
Data []*AttestationJson `json:"data"`
}
type AttesterSlashingsPoolResponseJson struct {
Data []*AttesterSlashingJson `json:"data"`
}
@@ -151,6 +159,10 @@ type ProposerSlashingsPoolResponseJson struct {
Data []*ProposerSlashingJson `json:"data"`
}
type VoluntaryExitsPoolResponseJson struct {
Data []*SignedVoluntaryExitJson `json:"data"`
}
type SubmitSyncCommitteeSignaturesRequestJson struct {
Data []*SyncCommitteeMessageJson `json:"data"`
}
@@ -187,7 +199,7 @@ type VersionResponseJson struct {
}
type SyncingResponseJson struct {
Data *shared.SyncDetails `json:"data"`
Data *helpers.SyncDetailsJson `json:"data"`
}
type BeaconStateResponseJson struct {
@@ -256,10 +268,18 @@ type ProduceBlindedBlockResponseJson struct {
Data *BlindedBeaconBlockContainerJson `json:"data"`
}
type ProduceAttestationDataResponseJson struct {
Data *AttestationDataJson `json:"data"`
}
type AggregateAttestationResponseJson struct {
Data *AttestationJson `json:"data"`
}
type SubmitBeaconCommitteeSubscriptionsRequestJson struct {
Data []*BeaconCommitteeSubscribeJson `json:"data"`
}
type BeaconCommitteeSubscribeJson struct {
ValidatorIndex string `json:"validator_index"`
CommitteeIndex string `json:"committee_index"`
@@ -268,10 +288,28 @@ type BeaconCommitteeSubscribeJson struct {
IsAggregator bool `json:"is_aggregator"`
}
type SubmitSyncCommitteeSubscriptionRequestJson struct {
Data []*SyncCommitteeSubscriptionJson `json:"data"`
}
type SyncCommitteeSubscriptionJson struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
UntilEpoch string `json:"until_epoch"`
}
type SubmitAggregateAndProofsRequestJson struct {
Data []*SignedAggregateAttestationAndProofJson `json:"data"`
}
type ProduceSyncCommitteeContributionResponseJson struct {
Data *SyncCommitteeContributionJson `json:"data"`
}
type SubmitContributionAndProofsRequestJson struct {
Data []*SignedContributionAndProofJson `json:"data"`
}
type ForkChoiceNodeResponseJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
@@ -966,6 +1004,22 @@ type SyncCommitteeContributionJson struct {
Signature string `json:"signature" hex:"true"`
}
type ValidatorRegistrationJson struct {
FeeRecipient string `json:"fee_recipient" hex:"true"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey string `json:"pubkey" hex:"true"`
}
type SignedValidatorRegistrationJson struct {
Message *ValidatorRegistrationJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedValidatorRegistrationsRequestJson struct {
Registrations []*SignedValidatorRegistrationJson `json:"registrations"`
}
type ForkChoiceNodeJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
@@ -1165,7 +1219,7 @@ type SingleIndexedVerificationFailureJson struct {
type NodeSyncDetailsErrorJson struct {
apimiddleware.DefaultErrorJson
SyncDetails shared.SyncDetails `json:"sync_details"`
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
}
type EventErrorJson struct {

View File

@@ -1,43 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"errors.go",
"log.go",
"service.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,49 +0,0 @@
package core
import (
"net/http"
"google.golang.org/grpc/codes"
)
type ErrorReason uint8
const (
Internal = iota
Unavailable
BadRequest
// Add more errors as needed
)
type RpcError struct {
Err error
Reason ErrorReason
}
func ErrorReasonToGRPC(reason ErrorReason) codes.Code {
switch reason {
case Internal:
return codes.Internal
case Unavailable:
return codes.Unavailable
case BadRequest:
return codes.InvalidArgument
// Add more cases for other error reasons as needed
default:
return codes.Internal
}
}
func ErrorReasonToHTTP(reason ErrorReason) int {
switch reason {
case Internal:
return http.StatusInternalServerError
case Unavailable:
return http.StatusServiceUnavailable
case BadRequest:
return http.StatusBadRequest
// Add more cases for other error reasons as needed
default:
return http.StatusInternalServerError
}
}

View File

@@ -1,5 +0,0 @@
package core
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "rpc/core")

View File

@@ -1,22 +0,0 @@
package core
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
type Service struct {
HeadFetcher blockchain.HeadFetcher
GenesisTimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
Broadcaster p2p.Broadcaster
SyncCommitteePool synccommittee.Pool
OperationNotifier opfeed.Notifier
AttestationCache *cache.AttestationCache
StateGen stategen.StateManager
}

View File

@@ -1,458 +0,0 @@
package core
import (
"bytes"
"context"
"fmt"
"sort"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// AggregateBroadcastFailedError represents an error scenario where
// broadcasting an aggregate selection proof failed.
type AggregateBroadcastFailedError struct {
err error
}
// NewAggregateBroadcastFailedError creates a new error instance.
func NewAggregateBroadcastFailedError(err error) AggregateBroadcastFailedError {
return AggregateBroadcastFailedError{
err: err,
}
}
// Error returns the underlying error message.
func (e *AggregateBroadcastFailedError) Error() string {
return fmt.Sprintf("could not broadcast signed aggregated attestation: %s", e.err.Error())
}
// ComputeValidatorPerformance reports the validator's latest balance along with other important metrics on
// rewards and penalties throughout its lifecycle in the beacon chain.
func (s *Service) ComputeValidatorPerformance(
ctx context.Context,
req *ethpb.ValidatorPerformanceRequest,
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
if s.SyncChecker.Syncing() {
return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")}
}
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
}
currSlot := s.GenesisTimeFetcher.CurrentSlot()
if currSlot > headState.Slot() {
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
if err != nil {
return nil, &RpcError{Err: errors.Wrapf(err, "could not process slots up to %d", currSlot), Reason: Internal}
}
}
var validatorSummary []*precompute.Validator
if headState.Version() == version.Phase0 {
vp, bp, err := precompute.New(ctx, headState)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
validatorSummary = vp
} else if headState.Version() >= version.Altair {
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
validatorSummary = vp
} else {
return nil, &RpcError{Err: errors.Wrapf(err, "head state version %d not supported", headState.Version()), Reason: Internal}
}
responseCap := len(req.Indices) + len(req.PublicKeys)
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
missingValidators := make([][]byte, 0, responseCap)
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
// Convert the list of validator public keys to validator indices and add to the indices set.
for _, pubKey := range req.PublicKeys {
// Skip empty public key.
if len(pubKey) == 0 {
continue
}
pubkeyBytes := bytesutil.ToBytes48(pubKey)
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
// Validator index not found, track as missing.
missingValidators = append(missingValidators, pubKey)
continue
}
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Add provided indices to the indices set.
for _, idx := range req.Indices {
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Depending on the indices and public keys given, results might not be sorted.
sort.Slice(validatorIndices, func(i, j int) bool {
return validatorIndices[i] < validatorIndices[j]
})
currentEpoch := coreTime.CurrentEpoch(headState)
responseCap = len(validatorIndices)
pubKeys := make([][]byte, 0, responseCap)
beforeTransitionBalances := make([]uint64, 0, responseCap)
afterTransitionBalances := make([]uint64, 0, responseCap)
effectiveBalances := make([]uint64, 0, responseCap)
correctlyVotedSource := make([]bool, 0, responseCap)
correctlyVotedTarget := make([]bool, 0, responseCap)
correctlyVotedHead := make([]bool, 0, responseCap)
inactivityScores := make([]uint64, 0, responseCap)
// Append performance summaries.
// Also track missing validators using public keys.
for _, idx := range validatorIndices {
val, err := headState.ValidatorAtIndexReadOnly(idx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get validator"), Reason: Internal}
}
pubKey := val.PublicKey()
if uint64(idx) >= uint64(len(validatorSummary)) {
// Not listed in validator summary yet; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
// Inactive validator; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
summary := validatorSummary[idx]
pubKeys = append(pubKeys, pubKey[:])
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
if headState.Version() == version.Phase0 {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
} else {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
inactivityScores = append(inactivityScores, summary.InactivityScore)
}
}
return &ethpb.ValidatorPerformanceResponse{
PublicKeys: pubKeys,
CorrectlyVotedSource: correctlyVotedSource,
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
CorrectlyVotedHead: correctlyVotedHead,
CurrentEffectiveBalances: effectiveBalances,
BalancesBeforeEpochTransition: beforeTransitionBalances,
BalancesAfterEpochTransition: afterTransitionBalances,
MissingValidators: missingValidators,
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}
// SubmitSignedContributionAndProof is called by a sync committee aggregator
// to submit signed contribution and proof object.
func (s *Service) SubmitSignedContributionAndProof(
ctx context.Context,
req *ethpb.SignedContributionAndProof,
) *RpcError {
errs, ctx := errgroup.WithContext(ctx)
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
errs.Go(func() error {
return s.Broadcaster.Broadcast(ctx, req)
})
if err := s.SyncCommitteePool.SaveSyncCommitteeContribution(req.Message.Contribution); err != nil {
return &RpcError{Err: err, Reason: Internal}
}
// Wait for p2p broadcast to complete and return the first error (if any)
err := errs.Wait()
if err != nil {
return &RpcError{Err: err, Reason: Internal}
}
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.SyncCommitteeContributionReceived,
Data: &opfeed.SyncCommitteeContributionReceivedData{
Contribution: req,
},
})
return nil
}
// SubmitSignedAggregateSelectionProof verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
func (s *Service) SubmitSignedAggregateSelectionProof(
ctx context.Context,
req *ethpb.SignedAggregateSubmitRequest,
) *RpcError {
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
return &RpcError{Err: errors.New("signed aggregate request can't be nil"), Reason: BadRequest}
}
emptySig := make([]byte, fieldparams.BLSSignatureLength)
if bytes.Equal(req.SignedAggregateAndProof.Signature, emptySig) ||
bytes.Equal(req.SignedAggregateAndProof.Message.SelectionProof, emptySig) {
return &RpcError{Err: errors.New("signed signatures can't be zero hashes"), Reason: BadRequest}
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
s.GenesisTimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return &RpcError{Err: errors.New("attestation slot is no longer valid from current time"), Reason: BadRequest}
}
if err := s.Broadcaster.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
return &RpcError{Err: &AggregateBroadcastFailedError{err: err}, Reason: Internal}
}
log.WithFields(logrus.Fields{
"slot": req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
"committeeIndex": req.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex,
"validatorIndex": req.SignedAggregateAndProof.Message.AggregatorIndex,
"aggregatedCount": req.SignedAggregateAndProof.Message.Aggregate.AggregationBits.Count(),
}).Debug("Broadcasting aggregated attestation and proof")
return nil
}
// AggregatedSigAndAggregationBits returns the aggregated signature and aggregation bits
// associated with a particular set of sync committee messages.
func (s *Service) AggregatedSigAndAggregationBits(
ctx context.Context,
req *ethpb.AggregatedSigAndAggregationBitsRequest) ([]byte, []byte, error) {
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
sigs := make([][]byte, 0, subCommitteeSize)
bits := ethpb.NewSyncCommitteeAggregationBits()
for _, msg := range req.Msgs {
if bytes.Equal(req.BlockRoot, msg.BlockRoot) {
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, req.Slot)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get sync subcommittee index")
}
for _, index := range headSyncCommitteeIndices {
i := uint64(index)
subnetIndex := i / subCommitteeSize
indexMod := i % subCommitteeSize
if subnetIndex == req.SubnetId && !bits.BitAt(indexMod) {
bits.SetBitAt(indexMod, true)
sigs = append(sigs, msg.Signature)
}
}
}
}
aggregatedSig := make([]byte, 96)
aggregatedSig[0] = 0xC0
if len(sigs) != 0 {
uncompressedSigs, err := bls.MultipleSignaturesFromBytes(sigs)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not decompress signatures")
}
aggregatedSig = bls.AggregateSignatures(uncompressedSigs).Marshal()
}
return aggregatedSig, bits, nil
}
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
func AssignValidatorToSubnet(pubkey []byte, status validator.ValidatorStatus) {
if status != validator.Active {
return
}
assignValidatorToSubnet(pubkey)
}
// AssignValidatorToSubnetProto checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
//
// It has a Proto suffix because the status is a protobuf type.
func AssignValidatorToSubnetProto(pubkey []byte, status ethpb.ValidatorStatus) {
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
return
}
assignValidatorToSubnet(pubkey)
}
func assignValidatorToSubnet(pubkey []byte) {
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
if ok && expTime.After(prysmTime.Now()) {
return
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var assignedIdxs []uint64
randGen := rand.NewGenerator()
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
}
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
totalDuration := epochDuration * time.Duration(assignedDuration)
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
}
// GetAttestationData requests that the beacon node produces attestation data for
// the requested committee index and slot based on the nodes current head.
func (s *Service) GetAttestationData(
ctx context.Context, req *ethpb.AttestationDataRequest,
) (*ethpb.AttestationData, *RpcError) {
if err := helpers.ValidateAttestationTime(
req.Slot,
s.GenesisTimeFetcher.GenesisTime(),
params.BeaconNetworkConfig().MaximumGossipClockDisparity,
); err != nil {
return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: %v", err)}
}
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res != nil {
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
if err := s.AttestationCache.MarkInProgress(req); err != nil {
if errors.Is(err, cache.ErrAlreadyInProgress) {
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res == nil {
return nil, &RpcError{Reason: Internal, Err: errors.New("a request was in progress and resolved to nil")}
}
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not mark attestation as in-progress: %v", err)}
}
defer func() {
if err := s.AttestationCache.MarkNotInProgress(req); err != nil {
log.WithError(err).Error("could not mark attestation as not-in-progress")
}
}()
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head state: %v", err)}
}
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head root: %v", err)}
}
// In the case that we receive an attestation request after a newer state/block has been processed.
if headState.Slot() > req.Slot {
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head root: %v", err)}
}
headState, err = s.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head state: %v", err)}
}
}
if headState == nil || headState.IsNil() {
return nil, &RpcError{Reason: Internal, Err: errors.New("could not lookup parent state from head")}
}
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
}
}
targetEpoch := coreTime.CurrentEpoch(headState)
epochStartSlot, err := slots.EpochStart(targetEpoch)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not calculate epoch start: %v", err)}
}
var targetRoot []byte
if epochStartSlot == headState.Slot() {
targetRoot = headRoot
} else {
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get target block for slot %d: %v", epochStartSlot, err)}
}
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
targetRoot = headRoot
}
}
res = &ethpb.AttestationData{
Slot: req.Slot,
CommitteeIndex: req.CommitteeIndex,
BeaconBlockRoot: headRoot,
Source: headState.CurrentJustifiedCheckpoint(),
Target: &ethpb.Checkpoint{
Epoch: targetEpoch,
Root: targetRoot,
},
}
if err := s.AttestationCache.Put(ctx, req, res); err != nil {
log.WithError(err).Error("could not store attestation data in cache")
}
return res, nil
}

View File

@@ -7,7 +7,6 @@ go_library(
"blocks.go",
"config.go",
"handlers.go",
"handlers_pool.go",
"log.go",
"pool.go",
"server.go",
@@ -19,7 +18,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -38,7 +36,6 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -46,18 +43,16 @@ go_library(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -67,13 +62,11 @@ go_library(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_go_playground_validator_v10//:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
@@ -88,7 +81,6 @@ go_test(
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"handlers_pool_test.go",
"handlers_test.go",
"init_test.go",
"pool_test.go",
@@ -99,7 +91,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/signing:go_default_library",
@@ -107,7 +98,6 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/blstoexec/mock:go_default_library",
@@ -115,7 +105,6 @@ go_test(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
@@ -127,14 +116,12 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -146,11 +133,8 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -18,9 +17,7 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"go.opencensus.io/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
@@ -41,9 +38,7 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
}
result, err := getBlindedBlockPhase0(blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
@@ -201,11 +196,11 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
ver := md.Get(api.VersionHeader)
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])

View File

@@ -5,8 +5,6 @@ import (
"testing"
"github.com/golang/mock/gomock"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v4/api"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
@@ -19,13 +17,11 @@ import (
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
func TestServer_GetBlindedBlock(t *testing.T) {
stream := &runtime.ServerTransportStream{}
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
ctx := context.Background()
t.Run("Phase 0", func(t *testing.T) {
b := util.NewBeaconBlock()
@@ -325,7 +321,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "phase0")
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -346,7 +342,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "altair")
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -367,7 +363,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "bellatrix")
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -385,7 +381,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "bellatrix")
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
@@ -406,7 +402,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "capella")
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -424,7 +420,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "capella")
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)

View File

@@ -3,11 +3,11 @@ package beacon
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
@@ -17,6 +17,7 @@ import (
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/network/forks"
@@ -24,16 +25,16 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
const versionHeader = "eth-consensus-version"
var (
errNilBlock = errors.New("nil block")
)
@@ -252,11 +253,11 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
ver := md.Get(api.VersionHeader)
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])
@@ -423,9 +424,6 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
}
result, err = getBlockAltair(blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
@@ -511,6 +509,93 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
// GetBlockRoot retrieves hashTreeRoot of ReadOnlyBeaconBlock/BeaconBlockHeader.
func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockRootResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockRoot")
defer span.End()
var root []byte
var err error
switch string(req.BlockId) {
case "head":
root, err = bs.ChainInfoFetcher.HeadRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head block: %v", err)
}
if root == nil {
return nil, status.Errorf(codes.NotFound, "No head root was found")
}
case "finalized":
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
root = finalized.Root
case "genesis":
blk, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return nil, status.Errorf(codes.NotFound, "Could not find genesis block: %v", err)
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Error(codes.Internal, "Could not hash genesis block")
}
root = blkRoot[:]
default:
if len(req.BlockId) == 32 {
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(req.BlockId))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve block for block root %#x: %v", req.BlockId, err)
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return nil, status.Errorf(codes.NotFound, "Could not find block: %v", err)
}
root = req.BlockId
} else {
slot, err := strconv.ParseUint(string(req.BlockId), 10, 64)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Could not parse block ID: %v", err)
}
hasRoots, roots, err := bs.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", slot, err)
}
if !hasRoots {
return nil, status.Error(codes.NotFound, "Could not find any blocks with given slot")
}
root = roots[0][:]
if len(roots) == 1 {
break
}
for _, blockRoot := range roots {
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
if canonical {
root = blockRoot[:]
break
}
}
}
}
b32Root := bytesutil.ToBytes32(root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
return &ethpbv1.BlockRootResponse{
Data: &ethpbv1.BlockRootContainer{
Root: root,
},
ExecutionOptimistic: isOptimistic,
Finalized: bs.FinalizationFetcher.IsFinalized(ctx, b32Root),
}, nil
}
// ListBlockAttestations retrieves attestation included in requested block.
func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockAttestationsResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListBlockAttestations")

View File

@@ -5,9 +5,7 @@ import (
"testing"
"github.com/golang/mock/gomock"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/api"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
@@ -26,7 +24,6 @@ import (
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@@ -447,7 +444,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "phase0")
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -468,7 +465,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "altair")
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -489,7 +486,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "bellatrix")
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -507,7 +504,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "bellatrix")
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
@@ -528,7 +525,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "capella")
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -546,7 +543,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(api.VersionHeader, "capella")
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
@@ -582,8 +579,8 @@ func TestServer_GetBlock(t *testing.T) {
}
func TestServer_GetBlockV2(t *testing.T) {
stream := &runtime.ServerTransportStream{}
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
ctx := context.Background()
t.Run("Phase 0", func(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 123
@@ -905,6 +902,173 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
})
}
func TestServer_GetBlockRoot(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
t.Run("get root", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
FinalizedRoots: map[[32]byte]bool{},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want []byte
wantErr bool
}{
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical slot",
blockID: []byte("30"),
want: blkContainers[30].BlockRoot,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.BlockRoot,
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].BlockRoot,
},
{
name: "genesis",
blockID: []byte("genesis"),
want: root[:],
},
{
name: "genesis root",
blockID: root[:],
want: root[:],
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].BlockRoot,
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "slot",
blockID: []byte("40"),
want: blkContainers[40].BlockRoot,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
assert.DeepEqual(t, tt.want, blockRootResp.Data.Root)
})
}
})
t.Run("execution optimistic", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{},
OptimisticRoots: map[[32]byte]bool{
bytesutil.ToBytes32(headBlock.BlockRoot): true,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
})
require.NoError(t, err)
assert.Equal(t, true, blockRootResp.ExecutionOptimistic)
})
t.Run("finalized", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{
bytesutil.ToBytes32(blkContainers[32].BlockRoot): true,
bytesutil.ToBytes32(blkContainers[64].BlockRoot): false,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
t.Run("true", func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("32"),
})
require.NoError(t, err)
assert.Equal(t, true, blockRootResp.Finalized)
})
t.Run("false", func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("64"),
})
require.NoError(t, err)
assert.Equal(t, false, blockRootResp.Finalized)
})
})
}
func TestServer_ListBlockAttestations(t *testing.T) {
ctx := context.Background()

View File

@@ -51,8 +51,6 @@ func TestGetSpec(t *testing.T) {
config.BellatrixForkEpoch = 101
config.CapellaForkVersion = []byte("CapellaForkVersion")
config.CapellaForkEpoch = 103
config.DenebForkVersion = []byte("DenebForkVersion")
config.DenebForkEpoch = 105
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -131,9 +129,6 @@ func TestGetSpec(t *testing.T) {
var dam [4]byte
copy(dam[:], []byte{'1', '0', '0', '0'})
config.DomainApplicationMask = dam
var dbs [4]byte
copy(dam[:], []byte{'2', '0', '0', '0'})
config.DomainBlobSidecar = dbs
params.OverrideBeaconConfig(config)
@@ -141,7 +136,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 111, len(resp.Data))
assert.Equal(t, 108, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":
@@ -210,10 +205,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
case "CAPELLA_FORK_EPOCH":
assert.Equal(t, "103", v)
case "DENEB_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
case "DENEB_FORK_EPOCH":
assert.Equal(t, "105", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
@@ -278,8 +269,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "51", v)
case "MAX_VOLUNTARY_EXITS":
assert.Equal(t, "52", v)
case "MAX_BLOBS_PER_BLOCK":
assert.Equal(t, "4", v)
case "TIMELY_HEAD_FLAG_INDEX":
assert.Equal(t, "0x35", v)
case "TIMELY_SOURCE_FLAG_INDEX":
@@ -346,8 +335,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "DOMAIN_BLOB_SIDECAR":
assert.Equal(t, "0x00000000", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":

View File

@@ -7,26 +7,15 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/go-playground/validator/v10"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
const (
@@ -46,153 +35,18 @@ const (
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
// query parameter.
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
return
}
isSSZ, err := http2.SszRequested(r)
if isSSZ && err == nil {
publishBlindedBlockV2SSZ(bs, w, r)
} else {
publishBlindedBlockV2(bs, w, r)
}
}
func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body: " + err.Error(),
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
return
}
capellaBlock := &ethpbv2.SignedBlindedBeaconBlockCapella{}
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.BlindedCapellaToV1Alpha1SignedBlock(capellaBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedCapella{
BlindedCapella: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
bellatrixBlock := &ethpbv2.SignedBlindedBeaconBlockBellatrix{}
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(bellatrixBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedBellatrix{
BlindedBellatrix: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
// blinded is not supported before bellatrix hardfork
altairBlock := &ethpbv2.SignedBeaconBlockAltair{}
if err := altairBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Altair{
Altair: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
phase0Block := &ethpbv1.SignedBeaconBlock{}
if err := phase0Block.UnmarshalSSZ(body); err == nil {
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Phase0{
Phase0: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
}
func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
@@ -201,19 +55,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -226,19 +80,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -250,19 +104,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -274,19 +128,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -294,11 +148,11 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
}
}
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
}
// PublishBlockV2 instructs the beacon node to broadcast a newly signed beacon block to the beacon network,
@@ -310,180 +164,39 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
// `broadcast_validation` query parameter.
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
isSSZ, err := http2.SszRequested(r)
if isSSZ && err == nil {
publishBlockV2SSZ(bs, w, r)
} else {
publishBlockV2(bs, w, r)
}
}
func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
capellaBlock := &ethpbv2.SignedBeaconBlockCapella{}
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
v1block, err := migration.CapellaToV1Alpha1SignedBlock(capellaBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Capella{
Capella: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
}
bellatrixBlock := &ethpbv2.SignedBeaconBlockBellatrix{}
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
if err = validate.Struct(bellatrixBlock); err == nil {
v1block, err := migration.BellatrixToV1Alpha1SignedBlock(bellatrixBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Bellatrix{
Bellatrix: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
}
altairBlock := &ethpbv2.SignedBeaconBlockAltair{}
if err := altairBlock.UnmarshalSSZ(body); err == nil {
if err = validate.Struct(altairBlock); err == nil {
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Altair{
Altair: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
}
phase0Block := &ethpbv1.SignedBeaconBlock{}
if err := phase0Block.UnmarshalSSZ(body); err == nil {
if err = validate.Struct(phase0Block); err == nil {
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Phase0{
Phase0: v1block,
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
}
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
}
func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
return
}
var capellaBlock *SignedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -495,19 +208,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -519,19 +232,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -543,19 +256,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -563,21 +276,21 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
}
}
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
}
func (bs *Server) proposeBlock(ctx context.Context, w http.ResponseWriter, blk *eth.GenericSignedBeaconBlock) {
_, err := bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, blk)
if err != nil {
errJson := &http2.DefaultErrorJson{
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
network.WriteError(w, errJson)
return
}
}
@@ -616,13 +329,8 @@ func (bs *Server) validateBroadcast(r *http.Request, blk *eth.GenericSignedBeaco
}
func (bs *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
parentBlockRoot := blk.Block().ParentRoot()
parentBlock, err := bs.Blocker.Block(ctx, parentBlockRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent block")
}
parentStateRoot := parentBlock.Block().StateRoot()
parentState, err := bs.Stater.State(ctx, parentStateRoot[:])
parentRoot := blk.Block().ParentRoot()
parentState, err := bs.Stater.State(ctx, parentRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
}
@@ -640,119 +348,28 @@ func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
return nil
}
// GetBlockRoot retrieves the root of a block.
func (bs *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
defer span.End()
var err error
var root []byte
blockID := mux.Vars(r)["block_id"]
if blockID == "" {
http2.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
return
}
switch blockID {
case "head":
root, err = bs.ChainInfoFetcher.HeadRoot(ctx)
if err != nil {
http2.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
return
}
if root == nil {
http2.HandleError(w, "No head root was found", http.StatusNotFound)
return
}
case "finalized":
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
root = finalized.Root
case "genesis":
blk, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil {
http2.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
return
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
http2.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
return
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
http2.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
return
}
root = blkRoot[:]
default:
isHex := strings.HasPrefix(blockID, "0x")
if isHex {
blockIDBytes, err := hexutil.Decode(blockID)
if err != nil {
http2.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
return
}
if len(blockIDBytes) != fieldparams.RootLength {
http2.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
return
}
blockID32 := bytesutil.ToBytes32(blockIDBytes)
blk, err := bs.BeaconDB.Block(ctx, blockID32)
if err != nil {
http2.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
return
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
http2.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
return
}
root = blockIDBytes
} else {
slot, err := strconv.ParseUint(blockID, 10, 64)
if err != nil {
http2.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
return
}
hasRoots, roots, err := bs.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if err != nil {
http2.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
return
}
if !hasRoots {
http2.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
return
}
root = roots[0][:]
if len(roots) == 1 {
break
}
for _, blockRoot := range roots {
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
if err != nil {
http2.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
return
}
if canonical {
root = blockRoot[:]
break
}
}
}
}
b32Root := bytesutil.ToBytes32(root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
if err != nil {
http2.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
return
errJson := &network.DefaultErrorJson{
Message: "Could not check if node is syncing: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return false
}
response := &BlockRootResponse{
Data: &struct {
Root string `json:"root"`
}{
Root: hexutil.Encode(root),
},
ExecutionOptimistic: isOptimistic,
Finalized: bs.FinalizationFetcher.IsFinalized(ctx, b32Root),
if isSyncing {
msg := "Beacon node is currently syncing and not serving request on that endpoint"
details, err := json.Marshal(syncDetails)
if err == nil {
msg += " Details: " + string(details)
}
errJson := &network.DefaultErrorJson{
Message: msg,
Code: http.StatusServiceUnavailable,
}
network.WriteError(w, errJson)
return false
}
http2.WriteJson(w, response)
return true
}

View File

@@ -1,258 +0,0 @@
package beacon
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/go-playground/validator/v10"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
// ListAttestations retrieves attestations known by the node but
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "beacon.ListAttestations")
defer span.End()
ok, rawSlot, slot := shared.UintFromQuery(w, r, "slot")
if !ok {
return
}
ok, rawCommitteeIndex, committeeIndex := shared.UintFromQuery(w, r, "committee_index")
if !ok {
return
}
attestations := s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
http2.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
isEmptyReq := rawSlot == "" && rawCommitteeIndex == ""
if isEmptyReq {
allAtts := make([]*shared.Attestation, len(attestations))
for i, att := range attestations {
allAtts[i] = shared.AttestationFromConsensus(att)
}
http2.WriteJson(w, &ListAttestationsResponse{Data: allAtts})
return
}
bothDefined := rawSlot != "" && rawCommitteeIndex != ""
filteredAtts := make([]*shared.Attestation, 0, len(attestations))
for _, att := range attestations {
committeeIndexMatch := rawCommitteeIndex != "" && att.Data.CommitteeIndex == primitives.CommitteeIndex(committeeIndex)
slotMatch := rawSlot != "" && att.Data.Slot == primitives.Slot(slot)
shouldAppend := (bothDefined && committeeIndexMatch && slotMatch) || (!bothDefined && (committeeIndexMatch || slotMatch))
if shouldAppend {
filteredAtts = append(filteredAtts, shared.AttestationFromConsensus(att))
}
}
http2.WriteJson(w, &ListAttestationsResponse{Data: filteredAtts})
}
// SubmitAttestations submits an Attestation object to node. If the attestation passes all validation
// constraints, node MUST publish the attestation on an appropriate subnet.
func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestations")
defer span.End()
var req SubmitAttestationsRequest
err := json.NewDecoder(r.Body).Decode(&req.Data)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(req.Data) == 0 {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
var validAttestations []*ethpbalpha.Attestation
var attFailures []*shared.IndexedVerificationFailure
for i, sourceAtt := range req.Data {
att, err := sourceAtt.ToConsensus()
if err != nil {
attFailures = append(attFailures, &shared.IndexedVerificationFailure{
Index: i,
Message: "Could not convert request attestation to consensus attestation: " + err.Error(),
})
continue
}
if _, err = bls.SignatureFromBytes(att.Signature); err != nil {
attFailures = append(attFailures, &shared.IndexedVerificationFailure{
Index: i,
Message: "Incorrect attestation signature: " + err.Error(),
})
continue
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !corehelpers.IsAggregated(att) {
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att,
},
})
}
validAttestations = append(validAttestations, att)
}
failedBroadcasts := make([]string, 0)
for i, att := range validAttestations {
// Determine subnet to broadcast attestation to
wantedEpoch := slots.ToEpoch(att.Data.Slot)
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
if err != nil {
http2.HandleError(w, "Could not get head validator indices: "+err.Error(), http.StatusInternalServerError)
return
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
}
if corehelpers.IsAggregated(att) {
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
}
} else {
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save unaggregated attestation")
}
}
}
if len(failedBroadcasts) > 0 {
http2.HandleError(
w,
fmt.Sprintf("Attestations at index %s could not be broadcasted", strings.Join(failedBroadcasts, ", ")),
http.StatusInternalServerError,
)
return
}
if len(attFailures) > 0 {
failuresErr := &shared.IndexedVerificationFailureError{
Code: http.StatusBadRequest,
Message: "One or more attestations failed validation",
Failures: attFailures,
}
http2.WriteError(w, failuresErr)
}
}
// ListVoluntaryExits retrieves voluntary exits known by the node but
// not necessarily incorporated into any block.
func (s *Server) ListVoluntaryExits(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "beacon.ListVoluntaryExits")
defer span.End()
sourceExits, err := s.VoluntaryExitsPool.PendingExits()
if err != nil {
http2.HandleError(w, "Could not get exits from the pool: "+err.Error(), http.StatusInternalServerError)
return
}
exits := make([]*shared.SignedVoluntaryExit, len(sourceExits))
for i, e := range sourceExits {
exits[i] = shared.SignedVoluntaryExitFromConsensus(e)
}
http2.WriteJson(w, &ListVoluntaryExitsResponse{Data: exits})
}
// SubmitVoluntaryExit submits SignedVoluntaryExit object to node's pool
// and if passes validation node MUST broadcast it to network.
func (s *Server) SubmitVoluntaryExit(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitVoluntaryExit")
defer span.End()
var req shared.SignedVoluntaryExit
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
exit, err := req.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request exit to consensus exit: "+err.Error(), http.StatusBadRequest)
return
}
headState, err := s.ChainInfoFetcher.HeadState(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
epochStart, err := slots.EpochStart(exit.Exit.Epoch)
if err != nil {
http2.HandleError(w, "Could not get epoch start: "+err.Error(), http.StatusInternalServerError)
return
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, epochStart)
if err != nil {
http2.HandleError(w, "Could not process slots: "+err.Error(), http.StatusInternalServerError)
return
}
val, err := headState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
http2.HandleError(w, "Could not get exiting validator: "+outOfRangeErr.Error(), http.StatusBadRequest)
return
}
http2.HandleError(w, "Could not get validator: "+err.Error(), http.StatusInternalServerError)
return
}
if err = blocks.VerifyExitAndSignature(val, headState.Slot(), headState.Fork(), exit, headState.GenesisValidatorsRoot()); err != nil {
http2.HandleError(w, "Invalid exit: "+err.Error(), http.StatusBadRequest)
return
}
s.VoluntaryExitsPool.InsertVoluntaryExit(exit)
if err = s.Broadcaster.Broadcast(ctx, exit); err != nil {
http2.HandleError(w, "Could not broadcast exit: "+err.Error(), http.StatusInternalServerError)
return
}
}

View File

@@ -1,604 +0,0 @@
package beacon
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestListAttestations(t *testing.T) {
att1 := &ethpbv1alpha1.Attestation{
AggregationBits: []byte{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
att2 := &ethpbv1alpha1.Attestation{
AggregationBits: []byte{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
att3 := &ethpbv1alpha1.Attestation{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 2,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature3"), 96),
}
att4 := &ethpbv1alpha1.Attestation{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
}
s := &Server{
AttestationsPool: attestations.NewPool(),
}
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]*ethpbv1alpha1.Attestation{att1, att2}))
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]*ethpbv1alpha1.Attestation{att3, att4}))
t.Run("empty request", func(t *testing.T) {
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 4, len(resp.Data))
})
t.Run("slot request", func(t *testing.T) {
url := "http://example.com?slot=2"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 2, len(resp.Data))
for _, a := range resp.Data {
assert.Equal(t, "2", a.Data.Slot)
}
})
t.Run("index request", func(t *testing.T) {
url := "http://example.com?committee_index=4"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 2, len(resp.Data))
for _, a := range resp.Data {
assert.Equal(t, "4", a.Data.CommitteeIndex)
}
})
t.Run("both slot + index request", func(t *testing.T) {
url := "http://example.com?slot=2&committee_index=4"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 1, len(resp.Data))
for _, a := range resp.Data {
assert.Equal(t, "2", a.Data.Slot)
assert.Equal(t, "4", a.Data.CommitteeIndex)
}
})
}
func TestServer_SubmitAttestations(t *testing.T) {
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
state.Slot = 1
state.PreviousJustifiedCheckpoint = &ethpbv1alpha1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
return nil
})
require.NoError(t, err)
b := bitfield.NewBitlist(1)
b.SetBitAt(0, true)
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
}
t.Run("single", func(t *testing.T) {
broadcaster := &p2pMock.MockBroadcaster{}
s.Broadcaster = broadcaster
s.AttestationsPool = attestations.NewPool()
var body bytes.Buffer
_, err := body.WriteString(singleAtt)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled)
assert.Equal(t, 1, len(broadcaster.BroadcastAttestations))
assert.Equal(t, "0x03", hexutil.Encode(broadcaster.BroadcastAttestations[0].AggregationBits))
assert.Equal(t, "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15", hexutil.Encode(broadcaster.BroadcastAttestations[0].Signature))
assert.Equal(t, primitives.Slot(0), broadcaster.BroadcastAttestations[0].Data.Slot)
assert.Equal(t, primitives.CommitteeIndex(0), broadcaster.BroadcastAttestations[0].Data.CommitteeIndex)
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.BeaconBlockRoot))
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.Source.Root))
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].Data.Source.Epoch)
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.Target.Root))
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].Data.Target.Epoch)
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("multiple", func(t *testing.T) {
broadcaster := &p2pMock.MockBroadcaster{}
s.Broadcaster = broadcaster
s.AttestationsPool = attestations.NewPool()
var body bytes.Buffer
_, err := body.WriteString(multipleAtts)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled)
assert.Equal(t, 2, len(broadcaster.BroadcastAttestations))
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("no body", func(t *testing.T) {
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestations(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
})
t.Run("empty", func(t *testing.T) {
var body bytes.Buffer
_, err := body.WriteString("[]")
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestations(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
})
t.Run("invalid", func(t *testing.T) {
var body bytes.Buffer
_, err := body.WriteString(invalidAtt)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestations(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &apimiddleware.IndexedVerificationFailureErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
require.Equal(t, 1, len(e.Failures))
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
})
}
func TestListVoluntaryExits(t *testing.T) {
exit1 := &ethpbv1alpha1.SignedVoluntaryExit{
Exit: &ethpbv1alpha1.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
exit2 := &ethpbv1alpha1.SignedVoluntaryExit{
Exit: &ethpbv1alpha1.VoluntaryExit{
Epoch: 2,
ValidatorIndex: 2,
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
s := &Server{
VoluntaryExitsPool: &mock.PoolMock{Exits: []*ethpbv1alpha1.SignedVoluntaryExit{exit1, exit2}},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListVoluntaryExits(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ListVoluntaryExitsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0x7369676e6174757265310000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", resp.Data[0].Signature)
assert.Equal(t, "1", resp.Data[0].Message.Epoch)
assert.Equal(t, "1", resp.Data[0].Message.ValidatorIndex)
assert.Equal(t, "0x7369676e6174757265320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", resp.Data[1].Signature)
assert.Equal(t, "2", resp.Data[1].Message.Epoch)
assert.Equal(t, "2", resp.Data[1].Message.ValidatorIndex)
}
func TestSubmitVoluntaryExit(t *testing.T) {
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
t.Run("ok", func(t *testing.T) {
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
// Satisfy activity time required before exiting.
state.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))
return nil
})
require.NoError(t, err)
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
var body bytes.Buffer
_, err = body.WriteString(exit1)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
require.NoError(t, err)
pendingExits, err := s.VoluntaryExitsPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(pendingExits))
assert.Equal(t, true, broadcaster.BroadcastCalled)
})
t.Run("across fork", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
params.OverrideBeaconConfig(config)
bs, _ := util.DeterministicGenesisState(t, 1)
// Satisfy activity time required before exiting.
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))))
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
var body bytes.Buffer
_, err := body.WriteString(exit2)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
require.NoError(t, err)
pendingExits, err := s.VoluntaryExitsPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(pendingExits))
assert.Equal(t, true, broadcaster.BroadcastCalled)
})
t.Run("no body", func(t *testing.T) {
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s := &Server{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
})
t.Run("invalid", func(t *testing.T) {
var body bytes.Buffer
_, err := body.WriteString(invalidExit1)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s := &Server{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
})
t.Run("wrong signature", func(t *testing.T) {
bs, _ := util.DeterministicGenesisState(t, 1)
s := &Server{ChainInfoFetcher: &blockchainmock.ChainService{State: bs}}
var body bytes.Buffer
_, err := body.WriteString(invalidExit2)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "Invalid exit"))
})
t.Run("invalid validator index", func(t *testing.T) {
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
s := &Server{ChainInfoFetcher: &blockchainmock.ChainService{State: bs}}
var body bytes.Buffer
_, err = body.WriteString(invalidExit3)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitVoluntaryExit(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "Could not get exiting validator"))
})
}
var (
singleAtt = `[
{
"aggregation_bits": "0x03",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
"index": "0",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "0",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"target": {
"epoch": "0",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
}
}
}
]`
multipleAtts = `[
{
"aggregation_bits": "0x03",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
"index": "0",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "0",
"root": "0x736f75726365726f6f7431000000000000000000000000000000000000000000"
},
"target": {
"epoch": "0",
"root": "0x746172676574726f6f7431000000000000000000000000000000000000000000"
}
}
},
{
"aggregation_bits": "0x03",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
"index": "0",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "0",
"root": "0x736f75726365726f6f7431000000000000000000000000000000000000000000"
},
"target": {
"epoch": "0",
"root": "0x746172676574726f6f7432000000000000000000000000000000000000000000"
}
}
}
]`
// signature is invalid
invalidAtt = `[
{
"aggregation_bits": "0x03",
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"data": {
"slot": "0",
"index": "0",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "0",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"target": {
"epoch": "0",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
}
}
}
]`
exit1 = `{
"message": {
"epoch": "0",
"validator_index": "0"
},
"signature": "0xaf20377dabe56887f72273806ea7f3bab3df464fe0178b2ec9bb83d891bf038671c222e2fa7fc0b3e83a0a86ecf235f6104f8130d9e3177cdf5391953fcebb9676f906f4e366b95cb4d734f48f7fc0f116c643519a58a3bb1f7501a1f64b87d2"
}`
exit2 = fmt.Sprintf(`{
"message": {
"epoch": "%d",
"validator_index": "0"
},
"signature": "0xa430330829331089c4381427217231c32c26ac551de410961002491257b1ef50c3d49a89fc920ac2f12f0a27a95ab9b811e49f04cb08020ff7dbe03bdb479f85614608c4e5d0108052497f4ae0148c0c2ef79c05adeaf74e6c003455f2cc5716"
}`, params.BeaconConfig().ShardCommitteePeriod+1)
// epoch is invalid
invalidExit1 = `{
"message": {
"epoch": "foo",
"validator_index": "0"
},
"signature": "0xaf20377dabe56887f72273806ea7f3bab3df464fe0178b2ec9bb83d891bf038671c222e2fa7fc0b3e83a0a86ecf235f6104f8130d9e3177cdf5391953fcebb9676f906f4e366b95cb4d734f48f7fc0f116c643519a58a3bb1f7501a1f64b87d2"
}`
// signature is wrong
invalidExit2 = `{
"message": {
"epoch": "0",
"validator_index": "0"
},
"signature": "0xa430330829331089c4381427217231c32c26ac551de410961002491257b1ef50c3d49a89fc920ac2f12f0a27a95ab9b811e49f04cb08020ff7dbe03bdb479f85614608c4e5d0108052497f4ae0148c0c2ef79c05adeaf74e6c003455f2cc5716"
}`
// non-existing validator index
invalidExit3 = `{
"message": {
"epoch": "0",
"validator_index": "99"
},
"signature": "0xa430330829331089c4381427217231c32c26ac551de410961002491257b1ef50c3d49a89fc920ac2f12f0a27a95ab9b811e49f04cb08020ff7dbe03bdb479f85614608c4e5d0108052497f4ae0148c0c2ef79c05adeaf74e6c003455f2cc5716"
}`
)

View File

@@ -2,33 +2,17 @@ package beacon
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/mock/gomock"
"github.com/gorilla/mux"
"github.com/pkg/errors"
testing2 "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/stretchr/testify/mock"
)
@@ -46,7 +30,7 @@ func TestPublishBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -63,7 +47,7 @@ func TestPublishBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -80,7 +64,7 @@ func TestPublishBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -97,7 +81,7 @@ func TestPublishBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -108,7 +92,7 @@ func TestPublishBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -124,7 +108,7 @@ func TestPublishBlockV2(t *testing.T) {
OptimisticModeFetcher: chainService,
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
@@ -133,74 +117,9 @@ func TestPublishBlockV2(t *testing.T) {
})
}
func TestPublishBlockV2SSZ(t *testing.T) {
ctrl := gomock.NewController(t)
t.Run("Bellatrix", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Bellatrix)
return ok
}))
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
var bellablock SignedBeaconBlockBellatrix
err := json.Unmarshal([]byte(bellatrixBlock), &bellablock)
require.NoError(t, err)
genericBlock, err := bellablock.ToGeneric()
require.NoError(t, err)
sszvalue, err := genericBlock.GetBellatrix().MarshalSSZ()
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
request.Header.Set("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
})
t.Run("Capella", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Capella)
return ok
}))
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
var cblock SignedBeaconBlockCapella
err := json.Unmarshal([]byte(capellaBlock), &cblock)
require.NoError(t, err)
genericBlock, err := cblock.ToGeneric()
require.NoError(t, err)
sszvalue, err := genericBlock.GetCapella().MarshalSSZ()
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
request.Header.Set("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
})
t.Run("invalid block", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlockV2(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
})
}
func TestPublishBlindedBlockV2(t *testing.T) {
ctrl := gomock.NewController(t)
t.Run("Phase 0", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
@@ -212,7 +131,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -229,7 +148,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -246,7 +165,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -263,7 +182,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -274,7 +193,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -290,7 +209,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
OptimisticModeFetcher: chainService,
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
@@ -299,340 +218,6 @@ func TestPublishBlindedBlockV2(t *testing.T) {
})
}
func TestPublishBlindedBlockV2SSZ(t *testing.T) {
ctrl := gomock.NewController(t)
t.Run("Bellatrix", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedBellatrix)
return ok
}))
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
var bellablock SignedBlindedBeaconBlockBellatrix
err := json.Unmarshal([]byte(blindedBellatrixBlock), &bellablock)
require.NoError(t, err)
genericBlock, err := bellablock.ToGeneric()
require.NoError(t, err)
sszvalue, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
request.Header.Set("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
})
t.Run("Capella", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedCapella)
return ok
}))
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
var cblock SignedBlindedBeaconBlockCapella
err := json.Unmarshal([]byte(blindedCapellaBlock), &cblock)
require.NoError(t, err)
genericBlock, err := cblock.ToGeneric()
require.NoError(t, err)
sszvalue, err := genericBlock.GetBlindedCapella().MarshalSSZ()
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
request.Header.Set("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
})
t.Run("invalid block", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlindedBlockV2(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
})
}
func TestValidateConsensus(t *testing.T) {
ctx := context.Background()
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
require.NoError(t, err)
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
require.NoError(t, err)
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
require.NoError(t, err)
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
require.NoError(t, err)
sbb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
parentRoot, err := parentSbb.Block().HashTreeRoot()
require.NoError(t, err)
server := &Server{
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
}
require.NoError(t, server.validateConsensus(ctx, sbb))
}
func TestValidateEquivocation(t *testing.T) {
t.Run("ok", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(10))
fc := doublylinkedtree.New()
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
server := &Server{
ForkchoiceFetcher: &testing2.ChainService{ForkChoiceStore: fc},
}
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
blk.SetSlot(st.Slot() + 1)
require.NoError(t, server.validateEquivocation(blk.Block()))
})
t.Run("block already exists", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(10))
fc := doublylinkedtree.New()
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
server := &Server{
ForkchoiceFetcher: &testing2.ChainService{ForkChoiceStore: fc},
}
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
blk.SetSlot(st.Slot())
assert.ErrorContains(t, "already exists", server.validateEquivocation(blk.Block()))
})
}
func TestServer_GetBlockRoot(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
url := "http://example.com/eth/v1/beacon/blocks/{block_id}}/root"
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
t.Run("get root", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*eth.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &testing2.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &eth.Checkpoint{Root: blkContainers[64].BlockRoot},
FinalizedRoots: map[[32]byte]bool{},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID map[string]string
want string
wantErr string
wantCode int
}{
{
name: "bad formatting",
blockID: map[string]string{"block_id": "3bad0"},
wantErr: "Could not parse block ID",
wantCode: http.StatusBadRequest,
},
{
name: "canonical slot",
blockID: map[string]string{"block_id": "30"},
want: hexutil.Encode(blkContainers[30].BlockRoot),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "head",
blockID: map[string]string{"block_id": "head"},
want: hexutil.Encode(headBlock.BlockRoot),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "finalized",
blockID: map[string]string{"block_id": "finalized"},
want: hexutil.Encode(blkContainers[64].BlockRoot),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "genesis",
blockID: map[string]string{"block_id": "genesis"},
want: hexutil.Encode(root[:]),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "genesis root",
blockID: map[string]string{"block_id": hexutil.Encode(root[:])},
want: hexutil.Encode(root[:]),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "root",
blockID: map[string]string{"block_id": hexutil.Encode(blkContainers[20].BlockRoot)},
want: hexutil.Encode(blkContainers[20].BlockRoot),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "non-existent root",
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
wantErr: "Could not find block",
wantCode: http.StatusNotFound,
},
{
name: "slot",
blockID: map[string]string{"block_id": "40"},
want: hexutil.Encode(blkContainers[40].BlockRoot),
wantErr: "",
wantCode: http.StatusOK,
},
{
name: "no block",
blockID: map[string]string{"block_id": "105"},
wantErr: "Could not find any blocks with given slot",
wantCode: http.StatusNotFound,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, tt.blockID)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
bs.GetBlockRoot(writer, request)
assert.Equal(t, tt.wantCode, writer.Code)
resp := &BlockRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
if tt.wantErr != "" {
require.ErrorContains(t, tt.wantErr, errors.New(writer.Body.String()))
return
}
require.NotNil(t, resp)
require.DeepEqual(t, resp.Data.Root, tt.want)
})
}
})
t.Run("execution optimistic", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*eth.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &testing2.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &eth.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{},
OptimisticRoots: map[[32]byte]bool{
bytesutil.ToBytes32(headBlock.BlockRoot): true,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"block_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
bs.GetBlockRoot(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &BlockRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.DeepEqual(t, resp.ExecutionOptimistic, true)
})
t.Run("finalized", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*eth.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &testing2.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &eth.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{
bytesutil.ToBytes32(blkContainers[32].BlockRoot): true,
bytesutil.ToBytes32(blkContainers[64].BlockRoot): false,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
t.Run("true", func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"block_id": "32"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
bs.GetBlockRoot(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &BlockRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.DeepEqual(t, resp.Finalized, true)
})
t.Run("false", func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"block_id": "64"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
bs.GetBlockRoot(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &BlockRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.DeepEqual(t, resp.Finalized, false)
})
})
}
const (
phase0Block = `{
"message": {
@@ -1083,8 +668,7 @@ const (
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
],
"data": {
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
@@ -1104,7 +688,7 @@ const (
}
],
"sync_aggregate": {
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"sync_committee_bits": "0x01",
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
},
"execution_payload": {
@@ -1262,8 +846,7 @@ const (
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
],
"data": {
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
@@ -1283,7 +866,7 @@ const (
}
],
"sync_aggregate": {
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"sync_committee_bits": "0x01",
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
},
"execution_payload_header": {
@@ -1439,8 +1022,7 @@ const (
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
],
"data": {
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
@@ -1460,7 +1042,7 @@ const (
}
],
"sync_aggregate": {
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"sync_committee_bits": "0x01",
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
},
"execution_payload": {
@@ -1636,8 +1218,7 @@ const (
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
],
"data": {
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
@@ -1657,7 +1238,7 @@ const (
}
],
"sync_aggregate": {
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"sync_committee_bits": "0x01",
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
},
"execution_payload_header": {

View File

@@ -8,14 +8,17 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -24,6 +27,121 @@ import (
const broadcastBLSChangesRateLimit = 128
// ListPoolAttestations retrieves attestations known by the node but
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpbv1.AttestationsPoolRequest) (*ethpbv1.AttestationsPoolResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
defer span.End()
attestations := bs.AttestationsPool.AggregatedAttestations()
unaggAtts, err := bs.AttestationsPool.UnaggregatedAttestations()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get unaggregated attestations: %v", err)
}
attestations = append(attestations, unaggAtts...)
isEmptyReq := req.Slot == nil && req.CommitteeIndex == nil
if isEmptyReq {
allAtts := make([]*ethpbv1.Attestation, len(attestations))
for i, att := range attestations {
allAtts[i] = migration.V1Alpha1AttestationToV1(att)
}
return &ethpbv1.AttestationsPoolResponse{Data: allAtts}, nil
}
filteredAtts := make([]*ethpbv1.Attestation, 0, len(attestations))
for _, att := range attestations {
bothDefined := req.Slot != nil && req.CommitteeIndex != nil
committeeIndexMatch := req.CommitteeIndex != nil && att.Data.CommitteeIndex == *req.CommitteeIndex
slotMatch := req.Slot != nil && att.Data.Slot == *req.Slot
if bothDefined && committeeIndexMatch && slotMatch {
filteredAtts = append(filteredAtts, migration.V1Alpha1AttestationToV1(att))
} else if !bothDefined && (committeeIndexMatch || slotMatch) {
filteredAtts = append(filteredAtts, migration.V1Alpha1AttestationToV1(att))
}
}
return &ethpbv1.AttestationsPoolResponse{Data: filteredAtts}, nil
}
// SubmitAttestations submits Attestation object to node. If attestation passes all validation
// constraints, node MUST publish attestation on appropriate subnet.
func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpbv1.SubmitAttestationsRequest) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttestation")
defer span.End()
var validAttestations []*ethpbalpha.Attestation
var attFailures []*helpers.SingleIndexedVerificationFailure
for i, sourceAtt := range req.Data {
att := migration.V1AttToV1Alpha1(sourceAtt)
if _, err := bls.SignatureFromBytes(att.Signature); err != nil {
attFailures = append(attFailures, &helpers.SingleIndexedVerificationFailure{
Index: i,
Message: "Incorrect attestation signature: " + err.Error(),
})
continue
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !corehelpers.IsAggregated(att) {
bs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att,
},
})
}
validAttestations = append(validAttestations, att)
}
broadcastFailed := false
for _, att := range validAttestations {
// Determine subnet to broadcast attestation to
wantedEpoch := slots.ToEpoch(att.Data.Slot)
vals, err := bs.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
if err != nil {
return nil, err
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
if err := bs.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
broadcastFailed = true
}
if corehelpers.IsAggregated(att) {
if err := bs.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated att")
}
} else {
if err := bs.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save unaggregated att")
}
}
}
if broadcastFailed {
return nil, status.Errorf(
codes.Internal,
"Could not publish one or more attestations. Some attestations could be published successfully.")
}
if len(attFailures) > 0 {
failuresContainer := &helpers.IndexedVerificationFailure{Failures: attFailures}
err := grpc.AppendCustomErrorHeader(ctx, failuresContainer)
if err != nil {
return nil, status.Errorf(
codes.InvalidArgument,
"One or more attestations failed validation. Could not prepare attestation failure information: %v",
err,
)
}
return nil, status.Errorf(codes.InvalidArgument, "One or more attestations failed validation")
}
return &emptypb.Empty{}, nil
}
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
// not necessarily incorporated into any block.
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.AttesterSlashingsPoolResponse, error) {
@@ -136,6 +254,63 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
return &emptypb.Empty{}, nil
}
// ListPoolVoluntaryExits retrieves voluntary exits known by the node but
// not necessarily incorporated into any block.
func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.VoluntaryExitsPoolResponse, error) {
_, span := trace.StartSpan(ctx, "beacon.ListPoolVoluntaryExits")
defer span.End()
sourceExits, err := bs.VoluntaryExitsPool.PendingExits()
if err != nil {
return nil, status.Error(codes.Internal, "Could not get exits from the pool")
}
exits := make([]*ethpbv1.SignedVoluntaryExit, len(sourceExits))
for i, s := range sourceExits {
exits[i] = migration.V1Alpha1ExitToV1(s)
}
return &ethpbv1.VoluntaryExitsPoolResponse{
Data: exits,
}, nil
}
// SubmitVoluntaryExit submits SignedVoluntaryExit object to node's pool
// and if passes validation node MUST broadcast it to network.
func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVoluntaryExit) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitVoluntaryExit")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
s, err := slots.EpochStart(req.Message.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get epoch from message: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, s)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
validator, err := headState.ValidatorAtIndexReadOnly(req.Message.ValidatorIndex)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get exiting validator: %v", err)
}
alphaExit := migration.V1ExitToV1Alpha1(req)
err = blocks.VerifyExitAndSignature(validator, headState.Slot(), headState.Fork(), alphaExit, headState.GenesisValidatorsRoot())
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Invalid voluntary exit: %v", err)
}
bs.VoluntaryExitsPool.InsertVoluntaryExit(alphaExit)
if err := bs.Broadcaster.Broadcast(ctx, alphaExit); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast voluntary exit object: %v", err)
}
return &emptypb.Empty{}, nil
}
// SubmitSignedBLSToExecutionChanges submits said object to the node's pool
// if it passes validation the node must broadcast it to the network.
func (bs *Server) SubmitSignedBLSToExecutionChanges(ctx context.Context, req *ethpbv2.SubmitBLSToExecutionChangesRequest) (*emptypb.Empty, error) {

View File

@@ -2,9 +2,14 @@ package beacon
import (
"context"
"reflect"
"strings"
"testing"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/go-bitfield"
grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
prysmtime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
@@ -13,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
blstoexecmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec/mock"
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -30,9 +36,172 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
func TestListPoolAttestations(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
att1 := &ethpbv1alpha1.Attestation{
AggregationBits: []byte{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
att2 := &ethpbv1alpha1.Attestation{
AggregationBits: []byte{4, 40},
Data: &ethpbv1alpha1.AttestationData{
Slot: 4,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 4,
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 40,
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
}
att3 := &ethpbv1alpha1.Attestation{
AggregationBits: []byte{2, 20},
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 2,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
att4 := &ethpbv1alpha1.Attestation{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 4,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
att5 := &ethpbv1alpha1.Attestation{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
att6 := &ethpbv1alpha1.Attestation{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
AttestationsPool: attestations.NewPool(),
}
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]*ethpbv1alpha1.Attestation{att1, att2, att3}))
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]*ethpbv1alpha1.Attestation{att4, att5, att6}))
t.Run("empty request", func(t *testing.T) {
req := &ethpbv1.AttestationsPoolRequest{}
resp, err := s.ListPoolAttestations(context.Background(), req)
require.NoError(t, err)
require.Equal(t, 6, len(resp.Data))
})
t.Run("slot request", func(t *testing.T) {
slot := primitives.Slot(2)
req := &ethpbv1.AttestationsPoolRequest{
Slot: &slot,
}
resp, err := s.ListPoolAttestations(context.Background(), req)
require.NoError(t, err)
require.Equal(t, 3, len(resp.Data))
for _, datum := range resp.Data {
assert.DeepEqual(t, datum.Data.Slot, slot)
}
})
t.Run("index request", func(t *testing.T) {
index := primitives.CommitteeIndex(4)
req := &ethpbv1.AttestationsPoolRequest{
CommitteeIndex: &index,
}
resp, err := s.ListPoolAttestations(context.Background(), req)
require.NoError(t, err)
require.Equal(t, 4, len(resp.Data))
for _, datum := range resp.Data {
assert.DeepEqual(t, datum.Data.Index, index)
}
})
t.Run("both slot + index request", func(t *testing.T) {
slot := primitives.Slot(2)
index := primitives.CommitteeIndex(4)
req := &ethpbv1.AttestationsPoolRequest{
Slot: &slot,
CommitteeIndex: &index,
}
resp, err := s.ListPoolAttestations(context.Background(), req)
require.NoError(t, err)
require.Equal(t, 2, len(resp.Data))
for _, datum := range resp.Data {
assert.DeepEqual(t, datum.Data.Index, index)
assert.DeepEqual(t, datum.Data.Slot, slot)
}
})
}
func TestListPoolAttesterSlashings(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
@@ -181,12 +350,39 @@ func TestListPoolProposerSlashings(t *testing.T) {
assert.DeepEqual(t, migration.V1Alpha1ProposerSlashingToV1(slashing2), resp.Data[1])
}
func TestListPoolVoluntaryExits(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
exit1 := &ethpbv1alpha1.SignedVoluntaryExit{
Exit: &ethpbv1alpha1.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
exit2 := &ethpbv1alpha1.SignedVoluntaryExit{
Exit: &ethpbv1alpha1.VoluntaryExit{
Epoch: 2,
ValidatorIndex: 2,
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{Exits: []*ethpbv1alpha1.SignedVoluntaryExit{exit1, exit2}},
}
resp, err := s.ListPoolVoluntaryExits(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
require.Equal(t, 2, len(resp.Data))
assert.DeepEqual(t, migration.V1Alpha1ExitToV1(exit1), resp.Data[0])
assert.DeepEqual(t, migration.V1Alpha1ExitToV1(exit2), resp.Data[1])
}
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -264,9 +460,6 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
@@ -343,10 +536,6 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
@@ -388,9 +577,6 @@ func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
func TestSubmitProposerSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -461,9 +647,6 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
@@ -532,10 +715,6 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
@@ -567,6 +746,448 @@ func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
assert.Equal(t, false, broadcaster.BroadcastCalled)
}
func TestSubmitVoluntaryExit_Ok(t *testing.T) {
ctx := context.Background()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
// Satisfy activity time required before exiting.
state.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))
return nil
})
require.NoError(t, err)
exit := &ethpbv1.SignedVoluntaryExit{
Message: &ethpbv1.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 0,
},
Signature: make([]byte, 96),
}
sb, err := signing.ComputeDomainAndSign(bs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
exit.Signature = sig.Marshal()
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.NoError(t, err)
pendingExits, err := s.VoluntaryExitsPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(pendingExits))
assert.DeepEqual(t, migration.V1ExitToV1Alpha1(exit), pendingExits[0])
assert.Equal(t, true, broadcaster.BroadcastCalled)
}
func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
// Satisfy activity time required before exiting.
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))))
exit := &ethpbv1.SignedVoluntaryExit{
Message: &ethpbv1.VoluntaryExit{
Epoch: params.BeaconConfig().ShardCommitteePeriod + 1,
ValidatorIndex: 0,
},
Signature: make([]byte, 96),
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
require.NoError(t, err)
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
exit.Signature = sig.Marshal()
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.NoError(t, err)
}
func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
ctx := context.Background()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
exit := &ethpbv1.SignedVoluntaryExit{
Message: &ethpbv1.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 99,
},
Signature: make([]byte, 96),
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.ErrorContains(t, "Could not get exiting validator", err)
assert.Equal(t, false, broadcaster.BroadcastCalled)
}
func TestSubmitVoluntaryExit_InvalidExit(t *testing.T) {
ctx := context.Background()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
exit := &ethpbv1.SignedVoluntaryExit{
Message: &ethpbv1.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 0,
},
Signature: make([]byte, 96),
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.ErrorContains(t, "Invalid voluntary exit", err)
assert.Equal(t, false, broadcaster.BroadcastCalled)
}
func TestServer_SubmitAttestations_Ok(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
state.Slot = 1
state.PreviousJustifiedCheckpoint = &ethpbv1alpha1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
return nil
})
require.NoError(t, err)
b := bitfield.NewBitlist(1)
b.SetBitAt(0, true)
sourceCheckpoint := &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
att1 := &ethpbv1.Attestation{
AggregationBits: b,
Data: &ethpbv1.AttestationData{
Slot: 0,
Index: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot1"), 32),
Source: sourceCheckpoint,
Target: &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
}
att2 := &ethpbv1.Attestation{
AggregationBits: b,
Data: &ethpbv1.AttestationData{
Slot: 0,
Index: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
Source: sourceCheckpoint,
Target: &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
}
for _, att := range []*ethpbv1.Attestation{att1, att2} {
sb, err := signing.ComputeDomainAndSign(
bs,
slots.ToEpoch(att.Data.Slot),
att.Data,
params.BeaconConfig().DomainBeaconAttester,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
AttestationsPool: attestations.NewPool(),
Broadcaster: broadcaster,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
}
_, err = s.SubmitAttestations(ctx, &ethpbv1.SubmitAttestationsRequest{
Data: []*ethpbv1.Attestation{att1, att2},
})
require.NoError(t, err)
assert.Equal(t, true, broadcaster.BroadcastCalled)
assert.Equal(t, 2, len(broadcaster.BroadcastAttestations))
expectedAtt1, err := att1.HashTreeRoot()
require.NoError(t, err)
expectedAtt2, err := att2.HashTreeRoot()
require.NoError(t, err)
actualAtt1, err := broadcaster.BroadcastAttestations[0].HashTreeRoot()
require.NoError(t, err)
actualAtt2, err := broadcaster.BroadcastAttestations[1].HashTreeRoot()
require.NoError(t, err)
for _, r := range [][32]byte{actualAtt1, actualAtt2} {
assert.Equal(t, true, reflect.DeepEqual(expectedAtt1, r) || reflect.DeepEqual(expectedAtt2, r))
}
require.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
}
func TestServer_SubmitAttestations_ValidAttestationSubmitted(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
state.Slot = 1
state.PreviousJustifiedCheckpoint = &ethpbv1alpha1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
return nil
})
require.NoError(t, err)
sourceCheckpoint := &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
b := bitfield.NewBitlist(1)
b.SetBitAt(0, true)
attValid := &ethpbv1.Attestation{
AggregationBits: b,
Data: &ethpbv1.AttestationData{
Slot: 0,
Index: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot1"), 32),
Source: sourceCheckpoint,
Target: &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
}
attInvalidSignature := &ethpbv1.Attestation{
AggregationBits: b,
Data: &ethpbv1.AttestationData{
Slot: 0,
Index: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
Source: sourceCheckpoint,
Target: &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
}
// Don't sign attInvalidSignature.
sb, err := signing.ComputeDomainAndSign(
bs,
slots.ToEpoch(attValid.Data.Slot),
attValid.Data,
params.BeaconConfig().DomainBeaconAttester,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
attValid.Signature = sig.Marshal()
broadcaster := &p2pMock.MockBroadcaster{}
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
AttestationsPool: attestations.NewPool(),
Broadcaster: broadcaster,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
}
_, err = s.SubmitAttestations(ctx, &ethpbv1.SubmitAttestationsRequest{
Data: []*ethpbv1.Attestation{attValid, attInvalidSignature},
})
require.ErrorContains(t, "One or more attestations failed validation", err)
expectedAtt, err := attValid.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, broadcaster.BroadcastCalled)
require.Equal(t, 1, len(broadcaster.BroadcastAttestations))
broadcastRoot, err := broadcaster.BroadcastAttestations[0].HashTreeRoot()
require.NoError(t, err)
require.DeepEqual(t, expectedAtt, broadcastRoot)
require.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
}
func TestServer_SubmitAttestations_InvalidAttestationGRPCHeader(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
state.Slot = 1
state.PreviousJustifiedCheckpoint = &ethpbv1alpha1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
}
return nil
})
require.NoError(t, err)
b := bitfield.NewBitlist(1)
b.SetBitAt(0, true)
att := &ethpbv1.Attestation{
AggregationBits: b,
Data: &ethpbv1.AttestationData{
Slot: 0,
Index: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: nil,
}
chain := &blockchainmock.ChainService{State: bs}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: chain,
AttestationsPool: attestations.NewPool(),
Broadcaster: broadcaster,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
HeadFetcher: chain,
}
_, err = s.SubmitAttestations(ctx, &ethpbv1.SubmitAttestationsRequest{
Data: []*ethpbv1.Attestation{att},
})
require.ErrorContains(t, "One or more attestations failed validation", err)
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
require.Equal(t, true, ok, "type assertion failed")
md := sts.Header()
v, ok := md[strings.ToLower(grpcutil.CustomErrorMetadataKey)]
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
assert.DeepEqual(
t,
[]string{"{\"failures\":[{\"index\":0,\"message\":\"Incorrect attestation signature: could not create signature from byte slice: signature must be 96 bytes\"}]}"},
v,
)
}
func TestListBLSToExecutionChanges(t *testing.T) {
change1 := &ethpbv1alpha1.SignedBLSToExecutionChange{
Message: &ethpbv1alpha1.BLSToExecutionChange{
@@ -598,10 +1219,6 @@ func TestListBLSToExecutionChanges(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -695,10 +1312,6 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -807,10 +1420,6 @@ func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.

View File

@@ -6,7 +6,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
bytesutil2 "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
@@ -14,26 +13,6 @@ import (
"github.com/wealdtech/go-bytesutil"
)
type BlockRootResponse struct {
Data *struct {
Root string `json:"root"`
} `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ListAttestationsResponse struct {
Data []*shared.Attestation `json:"data"`
}
type SubmitAttestationsRequest struct {
Data []*shared.Attestation `json:"data" validate:"required,dive"`
}
type ListVoluntaryExitsResponse struct {
Data []*shared.SignedVoluntaryExit
}
type SignedBeaconBlock struct {
Message BeaconBlock `json:"message" validate:"required"`
Signature string `json:"signature" validate:"required"`

View File

@@ -11,7 +11,6 @@ import (
statenative "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
@@ -103,13 +102,13 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
return &ethpb.StateValidatorsResponse{Data: valContainers, ExecutionOptimistic: isOptimistic, Finalized: isFinalized}, nil
}
filterStatus := make(map[validator.ValidatorStatus]bool, len(req.Status))
const lastValidStatusValue = 12
filterStatus := make(map[ethpb.ValidatorStatus]bool, len(req.Status))
const lastValidStatusValue = ethpb.ValidatorStatus(12)
for _, ss := range req.Status {
if ss > lastValidStatusValue {
return nil, status.Errorf(codes.InvalidArgument, "Invalid status "+ss.String())
}
filterStatus[validator.ValidatorStatus(ss)] = true
filterStatus[ss] = true
}
epoch := slots.ToEpoch(st.Slot())
filteredVals := make([]*ethpb.ValidatorContainer, 0, len(valContainers))
@@ -244,8 +243,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
if len(validatorIds) == 0 {
allValidators := state.Validators()
valContainers = make([]*ethpb.ValidatorContainer, len(allValidators))
for i, val := range allValidators {
readOnlyVal, err := statenative.NewValidator(val)
for i, validator := range allValidators {
readOnlyVal, err := statenative.NewValidator(validator)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
}
@@ -256,8 +255,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
valContainers[i] = &ethpb.ValidatorContainer{
Index: primitives.ValidatorIndex(i),
Balance: allBalances[i],
Status: ethpb.ValidatorStatus(subStatus),
Validator: migration.V1Alpha1ValidatorToV1(val),
Status: subStatus,
Validator: migration.V1Alpha1ValidatorToV1(validator),
}
}
} else {
@@ -279,7 +278,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
}
valIndex = primitives.ValidatorIndex(index)
}
val, err := state.ValidatorAtIndex(valIndex)
validator, err := state.ValidatorAtIndex(valIndex)
if _, ok := err.(*statenative.ValidatorIndexOutOfRangeError); ok {
// Ignore well-formed yet unknown indexes.
continue
@@ -287,8 +286,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
if err != nil {
return nil, errors.Wrap(err, "could not get validator")
}
v1Validator := migration.V1Alpha1ValidatorToV1(val)
readOnlyVal, err := statenative.NewValidator(val)
v1Validator := migration.V1Alpha1ValidatorToV1(validator)
readOnlyVal, err := statenative.NewValidator(validator)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
}
@@ -299,7 +298,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
valContainers = append(valContainers, &ethpb.ValidatorContainer{
Index: valIndex,
Balance: allBalances[valIndex],
Status: ethpb.ValidatorStatus(subStatus),
Status: subStatus,
Validator: v1Validator,
})
}

View File

@@ -15,7 +15,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -464,7 +463,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == validator.Active,
status == ethpb.ValidatorStatus_ACTIVE,
)
require.Equal(
t,
@@ -502,7 +501,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == validator.ActiveOngoing,
status == ethpb.ValidatorStatus_ACTIVE_ONGOING,
)
require.Equal(
t,
@@ -539,7 +538,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == validator.Exited,
status == ethpb.ValidatorStatus_EXITED,
)
require.Equal(
t,
@@ -575,7 +574,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == validator.PendingInitialized || status == validator.ExitedUnslashed,
status == ethpb.ValidatorStatus_PENDING_INITIALIZED || status == ethpb.ValidatorStatus_EXITED_UNSLASHED,
)
require.Equal(
t,
@@ -613,7 +612,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == validator.Pending || subStatus == validator.ExitedSlashed,
status == ethpb.ValidatorStatus_PENDING || subStatus == ethpb.ValidatorStatus_EXITED_SLASHED,
)
require.Equal(
t,

View File

@@ -1,48 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network/http:go_default_library",
"//proto/engine/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
],
)

View File

@@ -1,131 +0,0 @@
package builder
import (
"fmt"
"net/http"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ExpectedWithdrawals get the withdrawals computed from the specified state, that will be included in the block that gets built on the specified state.
func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Retrieve beacon state
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "state_id is required in URL params",
Code: http.StatusBadRequest,
})
return
}
st, err := s.Stater.State(r.Context(), []byte(stateId))
if err != nil {
http2.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
return
}
queryParam := r.URL.Query().Get("proposal_slot")
var proposalSlot primitives.Slot
if queryParam != "" {
pSlot, err := strconv.ParseUint(queryParam, 10, 64)
if err != nil {
http2.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
return
}
proposalSlot = primitives.Slot(pSlot)
} else {
proposalSlot = st.Slot() + 1
}
// Perform sanity checks on proposal slot before computing state
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
if err != nil {
http2.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
return
}
if proposalSlot < capellaStart {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "expected withdrawals are not supported before Capella fork",
Code: http.StatusBadRequest,
})
return
}
if proposalSlot <= st.Slot() {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot must be bigger than state slot. proposal slot: %d, state slot: %d", proposalSlot, st.Slot()),
Code: http.StatusBadRequest,
})
return
}
lookAheadLimit := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().MaxSeedLookahead)))
if st.Slot().Add(lookAheadLimit) <= proposalSlot {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot cannot be >= %d slots ahead of state slot", lookAheadLimit),
Code: http.StatusBadRequest,
})
return
}
// Get metadata for response
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
http2.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
if err != nil {
http2.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return
}
var blockRoot = [32]byte(root)
isFinalized := s.FinalizationFetcher.IsFinalized(r.Context(), blockRoot)
// Advance state forward to proposal slot
st, err = transition.ProcessSlots(r.Context(), st, proposalSlot)
if err != nil {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "could not process slots",
Code: http.StatusInternalServerError,
})
return
}
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "could not get expected withdrawals",
Code: http.StatusInternalServerError,
})
return
}
http2.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
})
}
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*ExpectedWithdrawal {
data := make([]*ExpectedWithdrawal, len(withdrawals))
for i, withdrawal := range withdrawals {
data[i] = &ExpectedWithdrawal{
Address: hexutil.Encode(withdrawal.Address),
Amount: strconv.FormatUint(withdrawal.Amount, 10),
Index: strconv.FormatUint(withdrawal.Index, 10),
ValidatorIndex: strconv.FormatUint(uint64(withdrawal.ValidatorIndex), 10),
}
}
return data
}
func handleWrapError(err error, message string, code int) *http2.DefaultErrorJson {
return &http2.DefaultErrorJson{
Message: errors.Wrapf(err, message).Error(),
Code: code,
}
}

View File

@@ -1,210 +0,0 @@
package builder
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestExpectedWithdrawals_BadRequest(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
testCases := []struct {
name string
path string
urlParams map[string]string
state state.BeaconState
errorMessage string
}{
{
name: "no state_id url params",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot" +
strconv.FormatUint(uint64(currentSlot), 10),
urlParams: map[string]string{},
state: nil,
errorMessage: "state_id is required in URL params",
},
{
name: "invalid proposal slot value",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=aaa",
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "invalid proposal slot value",
},
{
name: "proposal slot < Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot)-1, 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "expected withdrawals are not supported before Capella fork",
},
{
name: "proposal slot == Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot must be bigger than state slot",
},
{
name: "Proposal slot >= 128 slots ahead of state slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(currentSlot+128), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot cannot be >= 128 slots ahead of state slot",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: testCase.state},
}
request := httptest.NewRequest("GET", testCase.path, nil)
request = mux.SetURLVars(request, testCase.urlParams)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, testCase.errorMessage, e.Message)
})
}
}
func TestExpectedWithdrawals(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
t.Run("get correct expected withdrawals", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.MaxValidatorsPerWithdrawalsSweep = 16
params.OverrideBeaconConfig(cfg)
// Update state with updated validator fields
valCount := 17
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
val := &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
validators = append(validators, val)
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
epoch := slots.ToEpoch(st.Slot())
// Fully withdrawable now with more than 0 balance
validators[5].WithdrawableEpoch = epoch
// Fully withdrawable now but 0 balance
validators[10].WithdrawableEpoch = epoch
balances[10] = 0
// Partially withdrawable now but fully withdrawable after 1 epoch
validators[14].WithdrawableEpoch = epoch + 1
balances[14] += params.BeaconConfig().MinDepositAmount
// Partially withdrawable
validators[15].WithdrawableEpoch = epoch + 2
balances[15] += params.BeaconConfig().MinDepositAmount
// Above sweep bound
validators[16].WithdrawableEpoch = epoch + 1
balances[16] += params.BeaconConfig().MinDepositAmount
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
inactivityScores := make([]uint64, valCount)
for i := range inactivityScores {
inactivityScores[i] = 10
}
require.NoError(t, st.SetInactivityScores(inactivityScores))
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: st},
}
request := httptest.NewRequest(
"GET", "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot="+
strconv.FormatUint(uint64(currentSlot+params.BeaconConfig().SlotsPerEpoch), 10), nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ExpectedWithdrawalsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
assert.Equal(t, false, resp.Finalized)
assert.Equal(t, 3, len(resp.Data))
expectedWithdrawal1 := &ExpectedWithdrawal{
Index: strconv.FormatUint(0, 10),
ValidatorIndex: strconv.FormatUint(5, 10),
Address: hexutil.Encode(validators[5].WithdrawalCredentials[12:]),
// Decreased due to epoch processing when state advanced forward
Amount: strconv.FormatUint(31998257885, 10),
}
expectedWithdrawal2 := &ExpectedWithdrawal{
Index: strconv.FormatUint(1, 10),
ValidatorIndex: strconv.FormatUint(14, 10),
Address: hexutil.Encode(validators[14].WithdrawalCredentials[12:]),
// MaxEffectiveBalance + MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(32998257885, 10),
}
expectedWithdrawal3 := &ExpectedWithdrawal{
Index: strconv.FormatUint(2, 10),
ValidatorIndex: strconv.FormatUint(15, 10),
Address: hexutil.Encode(validators[15].WithdrawalCredentials[12:]),
// MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(998257885, 10),
}
require.DeepEqual(t, expectedWithdrawal1, resp.Data[0])
require.DeepEqual(t, expectedWithdrawal2, resp.Data[1])
require.DeepEqual(t, expectedWithdrawal3, resp.Data[2])
})
}

View File

@@ -1,12 +0,0 @@
package builder
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
)
type Server struct {
FinalizationFetcher blockchain.FinalizationFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Stater lookup.Stater
}

View File

@@ -1,14 +0,0 @@
package builder
type ExpectedWithdrawalsResponse struct {
Data []*ExpectedWithdrawal `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ExpectedWithdrawal struct {
Address string `json:"address" hex:"true"`
Amount string `json:"amount"`
Index string `json:"index"`
ValidatorIndex string `json:"validator_index"`
}

View File

@@ -11,6 +11,7 @@ go_library(
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -41,6 +42,7 @@ go_test(
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
gwpb "github.com/grpc-ecosystem/grpc-gateway/v2/proto/gateway"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
@@ -79,18 +80,26 @@ func (s *Server) StreamEvents(
}
// Subscribe to event feeds from information received in the beacon node runtime.
blockChan := make(chan *feed.Event, 1)
blockSub := s.BlockNotifier.BlockFeed().Subscribe(blockChan)
opsChan := make(chan *feed.Event, 1)
opsSub := s.OperationNotifier.OperationFeed().Subscribe(opsChan)
stateChan := make(chan *feed.Event, 1)
stateSub := s.StateNotifier.StateFeed().Subscribe(stateChan)
defer blockSub.Unsubscribe()
defer opsSub.Unsubscribe()
defer stateSub.Unsubscribe()
// Handle each event received and context cancelation.
for {
select {
case event := <-blockChan:
if err := handleBlockEvents(stream, requestedTopics, event); err != nil {
return status.Errorf(codes.Internal, "Could not handle block event: %v", err)
}
case event := <-opsChan:
if err := handleBlockOperationEvents(stream, requestedTopics, event); err != nil {
return status.Errorf(codes.Internal, "Could not handle block operations event: %v", err)
@@ -107,6 +116,37 @@ func (s *Server) StreamEvents(
}
}
func handleBlockEvents(
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
) error {
switch event.Type {
case blockfeed.ReceivedBlock:
if _, ok := requestedTopics[BlockTopic]; !ok {
return nil
}
blkData, ok := event.Data.(*blockfeed.ReceivedBlockData)
if !ok {
return nil
}
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
if err != nil {
return err
}
item, err := v1Data.Message.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash tree root block")
}
eventBlock := &ethpb.EventBlock{
Slot: v1Data.Message.Slot,
Block: item[:],
ExecutionOptimistic: blkData.IsOptimistic,
}
return streamData(stream, BlockTopic, eventBlock)
default:
return nil
}
}
func handleBlockOperationEvents(
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
) error {
@@ -212,28 +252,6 @@ func (s *Server) handleStateEvents(
return nil
}
return streamData(stream, ChainReorgTopic, reorg)
case statefeed.BlockProcessed:
if _, ok := requestedTopics[BlockTopic]; !ok {
return nil
}
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
return nil
}
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
if err != nil {
return err
}
item, err := v1Data.Message.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash tree root block")
}
eventBlock := &ethpb.EventBlock{
Slot: blkData.Slot,
Block: item[:],
ExecutionOptimistic: blkData.Optimistic,
}
return streamData(stream, BlockTopic, eventBlock)
default:
return nil
}

View File

@@ -13,6 +13,7 @@ import (
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
@@ -50,6 +51,55 @@ func TestStreamEvents_Preconditions(t *testing.T) {
})
}
func TestStreamEvents_BlockEvents(t *testing.T) {
t.Run(BlockTopic, func(t *testing.T) {
ctx := context.Background()
srv, ctrl, mockStream := setupServer(ctx, t)
defer ctrl.Finish()
blk := util.HydrateSignedBeaconBlock(&eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: 8,
},
})
bodyRoot, err := blk.Block.Body.HashTreeRoot()
require.NoError(t, err)
wantedHeader := util.HydrateBeaconHeader(&eth.BeaconBlockHeader{
Slot: 8,
BodyRoot: bodyRoot[:],
})
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
require.NoError(t, err)
genericResponse, err := anypb.New(&ethpb.EventBlock{
Slot: 8,
Block: wantedBlockRoot[:],
ExecutionOptimistic: true,
})
require.NoError(t, err)
wantedMessage := &gateway.EventSource{
Event: BlockTopic,
Data: genericResponse,
}
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
assertFeedSendAndReceive(ctx, &assertFeedArgs{
t: t,
srv: srv,
topics: []string{BlockTopic},
stream: mockStream,
shouldReceive: wantedMessage,
itemToSend: &feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{
SignedBlock: wsb,
IsOptimistic: true,
},
},
feed: srv.BlockNotifier.BlockFeed(),
})
})
}
func TestStreamEvents_OperationsEvents(t *testing.T) {
t.Run("attestation_unaggregated", func(t *testing.T) {
ctx := context.Background()
@@ -538,53 +588,6 @@ func TestStreamEvents_StateEvents(t *testing.T) {
feed: srv.StateNotifier.StateFeed(),
})
})
t.Run(BlockTopic, func(t *testing.T) {
ctx := context.Background()
srv, ctrl, mockStream := setupServer(ctx, t)
defer ctrl.Finish()
blk := util.HydrateSignedBeaconBlock(&eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: 8,
},
})
bodyRoot, err := blk.Block.Body.HashTreeRoot()
require.NoError(t, err)
wantedHeader := util.HydrateBeaconHeader(&eth.BeaconBlockHeader{
Slot: 8,
BodyRoot: bodyRoot[:],
})
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
require.NoError(t, err)
genericResponse, err := anypb.New(&ethpb.EventBlock{
Slot: 8,
Block: wantedBlockRoot[:],
ExecutionOptimistic: true,
})
require.NoError(t, err)
wantedMessage := &gateway.EventSource{
Event: BlockTopic,
Data: genericResponse,
}
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
assertFeedSendAndReceive(ctx, &assertFeedArgs{
t: t,
srv: srv,
topics: []string{BlockTopic},
stream: mockStream,
shouldReceive: wantedMessage,
itemToSend: &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: 8,
SignedBlock: wsb,
Optimistic: true,
},
},
feed: srv.StateNotifier.StateFeed(),
})
})
}
func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
@@ -648,6 +651,7 @@ func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
func setupServer(ctx context.Context, t testing.TB) (*Server, *gomock.Controller, *mock.MockEvents_StreamEventsServer) {
srv := &Server{
BlockNotifier: &mockChain.MockBlockNotifier{},
StateNotifier: &mockChain.MockStateNotifier{},
OperationNotifier: &mockChain.MockOperationNotifier{},
Ctx: ctx,

View File

@@ -7,6 +7,7 @@ import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
)
@@ -16,6 +17,7 @@ import (
type Server struct {
Ctx context.Context
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
OperationNotifier opfeed.Notifier
HeadFetcher blockchain.HeadFetcher
ChainInfoFetcher blockchain.ChainInfoFetcher

Some files were not shown because too many files have changed in this diff Show More