mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
51 Commits
deneb-test
...
prestonvan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12d703d7ac | ||
|
|
ff3d2bc69f | ||
|
|
dd403f830c | ||
|
|
e9c8e84618 | ||
|
|
9c250dd4c2 | ||
|
|
f97db3b738 | ||
|
|
43378ae8d5 | ||
|
|
2217b45e16 | ||
|
|
405cd6ed86 | ||
|
|
ba9bbdd6b9 | ||
|
|
945c76132c | ||
|
|
056d3ff0cc | ||
|
|
d4fd3c34de | ||
|
|
4ac4d00377 | ||
|
|
ec2fda7ad9 | ||
|
|
292f4de099 | ||
|
|
145a485b75 | ||
|
|
7e474b7a30 | ||
|
|
af0ee9bd16 | ||
|
|
456ba7c498 | ||
|
|
cd8847c53b | ||
|
|
1894a124ea | ||
|
|
490bd22b97 | ||
|
|
f23e720a16 | ||
|
|
402799a584 | ||
|
|
0266609bf6 | ||
|
|
58df1f1ba5 | ||
|
|
cec32cb996 | ||
|
|
d56a530c86 | ||
|
|
0a68d2d302 | ||
|
|
25ebd335cb | ||
|
|
6a0db800b3 | ||
|
|
085f90a4f1 | ||
|
|
ecb26e9885 | ||
|
|
7eb0091936 | ||
|
|
f8408b9ec1 | ||
|
|
d6d5139d68 | ||
|
|
2e0e29ecbe | ||
|
|
e9b5e52ee2 | ||
|
|
2a4441762e | ||
|
|
401fccc723 | ||
|
|
c80f88fc07 | ||
|
|
faa0a2c4cf | ||
|
|
c45cb7e188 | ||
|
|
0b10263dd5 | ||
|
|
3bc808352f | ||
|
|
d0c740f477 | ||
|
|
cbe67f1970 | ||
|
|
5bb482e5d6 | ||
|
|
83494c5b23 | ||
|
|
a10ffa9c0e |
4
.bazelrc
4
.bazelrc
@@ -43,3 +43,7 @@ build:debug -s
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
# Abseil requires c++14 or greater.
|
||||
build --cxxopt=-std=c++20
|
||||
build --host_cxxopt=-std=c++20
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
|
||||
@@ -35,3 +35,4 @@ Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.ne
|
||||
## Legal Disclaimer
|
||||
|
||||
[Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -16,14 +16,12 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "57f03a6c29793e8add7bd64186fc8066d23b5ffd06fe9cc6b0b8c499914d3a65",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
],
|
||||
)
|
||||
|
||||
@@ -174,7 +172,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.3",
|
||||
go_version = "1.20.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -321,11 +319,13 @@ http_archive(
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
|
||||
)
|
||||
|
||||
git_repository(
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1617835118 -0700",
|
||||
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
|
||||
strip_prefix = "protobuf-23.3",
|
||||
urls = [
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
|
||||
8
api/BUILD.bazel
Normal file
8
api/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["headers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -128,6 +128,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
@@ -226,6 +227,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
@@ -399,6 +401,7 @@ func TestDownloadFinalizedData(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -32,6 +33,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
)
|
||||
|
||||
@@ -116,7 +117,11 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
|
||||
// Something went wrong, but the request completed, meaning we can write headers and the error message.
|
||||
for h, vs := range resp.Header {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, v)
|
||||
} else {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle gRPC timeout.
|
||||
@@ -187,9 +192,11 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
|
||||
var statusCodeHeader string
|
||||
for h, vs := range grpcResp.Header {
|
||||
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
|
||||
if strings.HasPrefix(h, "Grpc-Metadata") {
|
||||
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
|
||||
if strings.HasPrefix(h, grpc.MetadataPrefix) {
|
||||
if h == grpc.WithPrefix(grpc.HttpCodeMetadataKey) {
|
||||
statusCodeHeader = vs[0]
|
||||
} else if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, vs[0])
|
||||
}
|
||||
} else {
|
||||
for _, v := range vs {
|
||||
@@ -223,7 +230,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
|
||||
// Include custom error in the error JSON.
|
||||
hasCustomError := false
|
||||
if responseHeader != nil {
|
||||
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
|
||||
customError, ok := responseHeader[grpc.WithPrefix(grpc.CustomErrorMetadataKey)]
|
||||
if ok {
|
||||
hasCustomError = true
|
||||
// Assume header has only one value and read the 0 index.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -280,7 +281,8 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
|
||||
grpc.WithPrefix(grpc.HttpCodeMetadataKey): []string{"204"},
|
||||
grpc.WithPrefix(api.VersionHeader): []string{"capella"},
|
||||
},
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
@@ -299,6 +301,9 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "224", v[0])
|
||||
v, ok = writer.Header()["Eth-Consensus-Version"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
assert.Equal(t, "capella", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
@@ -320,11 +325,12 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
|
||||
t.Run("GET_invalid_status_code", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
Header: http.Header{"Grpc-Metadata-Eth-Consensus-Version": []string{"capella"}},
|
||||
}
|
||||
|
||||
// Set invalid status code.
|
||||
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
|
||||
response.Header[grpc.WithPrefix(grpc.HttpCodeMetadataKey)] = []string{"invalid"}
|
||||
response.Header[grpc.WithPrefix(api.VersionHeader)] = []string{"capella"}
|
||||
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
@@ -390,7 +396,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"{\"CustomField\":\"bar\"}"},
|
||||
}
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
@@ -420,7 +426,7 @@ func TestWriteError(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"invalid"},
|
||||
}
|
||||
|
||||
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)
|
||||
|
||||
@@ -6,3 +6,11 @@ const CustomErrorMetadataKey = "Custom-Error"
|
||||
|
||||
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
|
||||
const HttpCodeMetadataKey = "X-Http-Code"
|
||||
|
||||
// MetadataPrefix is the prefix for grpc headers on metadata
|
||||
const MetadataPrefix = "Grpc-Metadata"
|
||||
|
||||
// WithPrefix creates a new string with grpc metadata prefix
|
||||
func WithPrefix(value string) string {
|
||||
return MetadataPrefix + "-" + value
|
||||
}
|
||||
|
||||
7
api/headers.go
Normal file
7
api/headers.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package api
|
||||
|
||||
const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
@@ -387,7 +387,7 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -499,6 +499,13 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -41,13 +41,15 @@ var (
|
||||
type invalidBlock struct {
|
||||
invalidAncestorRoots [][32]byte
|
||||
error
|
||||
root [32]byte
|
||||
root [32]byte
|
||||
lastValidHash [32]byte
|
||||
}
|
||||
|
||||
type invalidBlockError interface {
|
||||
Error() string
|
||||
InvalidAncestorRoots() [][32]byte
|
||||
BlockRoot() [32]byte
|
||||
LastValidHash() [32]byte
|
||||
}
|
||||
|
||||
// BlockRoot returns the invalid block root.
|
||||
@@ -55,6 +57,11 @@ func (e invalidBlock) BlockRoot() [32]byte {
|
||||
return e.root
|
||||
}
|
||||
|
||||
// LastValidHash returns the last valid hash root.
|
||||
func (e invalidBlock) LastValidHash() [32]byte {
|
||||
return e.lastValidHash
|
||||
}
|
||||
|
||||
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
|
||||
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
|
||||
return e.invalidAncestorRoots
|
||||
@@ -72,6 +79,19 @@ func IsInvalidBlock(e error) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
// doesn't have a last valid hash, [32]byte{} is returned.
|
||||
func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
return d.LastValidHash()
|
||||
}
|
||||
|
||||
// InvalidBlockRoot returns the invalid block root. If the error
|
||||
// doesn't have an invalid blockroot. [32]byte{} is returned.
|
||||
func InvalidBlockRoot(e error) [32]byte {
|
||||
|
||||
@@ -182,21 +182,24 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -220,35 +223,37 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidChildrenCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
invalidAncestorRoots: invalidRoots,
|
||||
error: ErrInvalidPayload,
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
}
|
||||
case execution.ErrInvalidBlockHashPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
return false, ErrInvalidBlockHashPayloadStatus
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidChildrenCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return invalidBlock{
|
||||
invalidAncestorRoots: invalidRoots,
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
}
|
||||
}
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
|
||||
|
||||
@@ -525,11 +525,13 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
blk: altairBlk, // same as phase 0 for this test
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
@@ -743,6 +745,37 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
func Test_reportInvalidBlock(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
|
||||
jcp := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
|
||||
require.Equal(t, IsInvalidBlock(err), true)
|
||||
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
|
||||
invalidRoots := InvalidAncestorRoots(err)
|
||||
require.Equal(t, 3, len(invalidRoots))
|
||||
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
|
||||
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
|
||||
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
@@ -172,11 +172,15 @@ var (
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
|
||||
})
|
||||
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "state_transition_processing_milliseconds",
|
||||
Help: "Total time to call a state transition in onBlock()",
|
||||
Help: "Total time to call a state transition in validateStateTransition()",
|
||||
})
|
||||
chainServiceProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "chain_service_processing_milliseconds",
|
||||
Help: "Total time to call a chain service in ReceiveBlock()",
|
||||
})
|
||||
processAttsElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
@@ -246,40 +250,45 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range postState.Validators() {
|
||||
for i := 0; i < postState.NumValidators(); i++ {
|
||||
validator, err := postState.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not load validator")
|
||||
continue
|
||||
}
|
||||
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not load validator balance")
|
||||
continue
|
||||
}
|
||||
if validator.Slashed {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
if validator.Slashed() {
|
||||
if currentEpoch < validator.ExitEpoch() {
|
||||
slashingInstances++
|
||||
slashingBalance += bal
|
||||
slashingEffectiveBalance += validator.EffectiveBalance
|
||||
slashingEffectiveBalance += validator.EffectiveBalance()
|
||||
} else {
|
||||
slashedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch() {
|
||||
exitingInstances++
|
||||
exitingBalance += bal
|
||||
exitingEffectiveBalance += validator.EffectiveBalance
|
||||
exitingEffectiveBalance += validator.EffectiveBalance()
|
||||
} else {
|
||||
exitedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if currentEpoch < validator.ActivationEpoch {
|
||||
if currentEpoch < validator.ActivationEpoch() {
|
||||
pendingInstances++
|
||||
pendingBalance += bal
|
||||
continue
|
||||
}
|
||||
activeInstances++
|
||||
activeBalance += bal
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
activeEffectiveBalance += validator.EffectiveBalance()
|
||||
}
|
||||
activeInstances += exitingInstances + slashingInstances
|
||||
activeBalance += exitingBalance + slashingBalance
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -40,59 +39,11 @@ const depositDeadline = 20 * time.Second
|
||||
// This defines size of the upper bound for initial sync block cache.
|
||||
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
// computation in this method and methods it calls into.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
|
||||
// block = signed_block.message
|
||||
// # Parent block must be known
|
||||
// assert block.parent_root in store.block_states
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// pre_state = copy(store.block_states[block.parent_root])
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert get_current_slot(store) >= block.slot
|
||||
//
|
||||
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// assert block.slot > finalized_slot
|
||||
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
//
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = pre_state.copy()
|
||||
// state_transition(state, signed_block, True)
|
||||
// # Add new block to the store
|
||||
// store.blocks[hash_tree_root(block)] = block
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[hash_tree_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
//
|
||||
// # Potentially update justified if different from store
|
||||
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||
// # Update justified if new justified is later than store justified
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
// return
|
||||
//
|
||||
// # Update justified if store justified is not in chain with finalized checkpoint
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
// postBlockProcess is called when a gossip block is received. This function performs
|
||||
// several duties most importantly informing the engine if head was updated,
|
||||
// saving the new head information to the blockchain package and
|
||||
// handling attestations, slashings and similar included in the block.
|
||||
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
|
||||
@@ -101,50 +52,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
startTime := time.Now()
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -159,33 +66,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
go func() {
|
||||
// Using a different context to prevent timeouts as this operation can be expensive
|
||||
// and we want to avoid affecting the critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
}
|
||||
}()
|
||||
}
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
@@ -227,6 +107,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
return err
|
||||
}
|
||||
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -235,49 +121,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: signed,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
|
||||
// Save justified check point to db.
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: stateRoot[:],
|
||||
ExecutionOptimistic: isValidPayload,
|
||||
},
|
||||
})
|
||||
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
|
||||
return err
|
||||
@@ -403,7 +250,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return err
|
||||
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -492,9 +339,20 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
|
||||
e := coreTime.CurrentEpoch(copied)
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied, e); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := helpers.UpdateProposerIndicesInCache(slotCtx, copied, e+1); err != nil {
|
||||
log.WithError(err).Warn("Failed to cache next epoch proposers")
|
||||
}
|
||||
}()
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
||||
if err != nil {
|
||||
@@ -506,7 +364,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -518,7 +376,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -706,3 +563,10 @@ func (s *Service) waitForSync() error {
|
||||
return errors.New("context closed, exiting goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -209,35 +210,44 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
|
||||
// inserts finalized deposits into our finalized deposit trie, needs to be
|
||||
// called in the background
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
eth1DepositIndex -= 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return err
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
|
||||
|
||||
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
|
||||
@@ -41,103 +41,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
ojc := ðpb.Checkpoint{}
|
||||
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := util.NewBeaconBlock()
|
||||
random.Block.Slot = 1
|
||||
random.Block.ParentRoot = validGenesisRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, random)
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
|
||||
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.SignedBeaconBlock
|
||||
s state.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: util.NewBeaconBlock(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "could not reconstruct parent state",
|
||||
},
|
||||
{
|
||||
name: "block is from the future",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "is in the far distant future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot[:]
|
||||
b.Block.Slot = 2
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "not descendant of finalized checkpoint",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 0
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fRoot := bytesutil.ToBytes32(roots[0])
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
assert.ErrorContains(t, tt.wantErrString, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
@@ -657,7 +560,20 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.NewSlot(ctx, i))
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -692,7 +608,20 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -714,8 +643,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
|
||||
err := service.onBlock(tr.ctx, nil, [32]byte{})
|
||||
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -729,11 +657,11 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, r)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -757,7 +685,13 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -783,7 +717,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
@@ -792,6 +726,45 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
var zeroSig [96]byte
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
depositCache.InsertPendingDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
pendingDeps := depositCache.PendingContainers(ctx, nil)
|
||||
for _, d := range pendingDeps {
|
||||
assert.DeepEqual(t, true, d.Index >= 8, "Pending deposits were not pruned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
@@ -819,7 +792,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
|
||||
@@ -829,7 +802,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
@@ -1131,19 +1104,35 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb1, r1))
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb2, r2))
|
||||
preState, err := service.getBlockPreState(ctx, wsb2.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb3, r3))
|
||||
preState, err := service.getBlockPreState(ctx, wsb3.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb4, r4))
|
||||
preState, err := service.getBlockPreState(ctx, wsb4.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
@@ -1211,7 +1200,13 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1224,7 +1219,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1238,7 +1238,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1255,7 +1260,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1278,7 +1288,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1301,7 +1316,13 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1358,7 +1379,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1371,7 +1397,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1385,7 +1416,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1402,7 +1439,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1425,7 +1467,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1448,7 +1495,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1506,7 +1558,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1519,7 +1577,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1533,7 +1597,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
lastValidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, lastValidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1555,7 +1624,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
invalidRoots[i-13], err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, invalidRoots[i-13])
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1576,7 +1650,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1610,7 +1689,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1628,7 +1712,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1648,7 +1737,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1699,7 +1794,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1712,7 +1812,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1726,7 +1831,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
lastValidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, lastValidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1747,7 +1857,18 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1766,7 +1887,11 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1848,7 +1973,12 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -86,6 +86,11 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
@@ -128,7 +128,13 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -178,7 +184,13 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -7,11 +7,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -47,15 +53,64 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, err := s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
isValidPayload, err := s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
|
||||
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(blockCopy, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
}
|
||||
|
||||
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
|
||||
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
|
||||
log.WithError(err).Error("Could not prune canonical objects from pool ")
|
||||
@@ -86,6 +141,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,6 +168,11 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blkRoots[i])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -119,6 +181,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
BlockRoot: blkRoots[i],
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -226,3 +289,109 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
// This performs the state transition function and returns the poststate or an
|
||||
// error if the block fails to verify the consensus rules
|
||||
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
b := signed.Block()
|
||||
// Verify that the parent block is in forkchoice
|
||||
parentRoot := b.ParentRoot()
|
||||
if !s.InForkchoice(parentRoot) {
|
||||
return nil, ErrNotDescendantOfFinalized
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return nil, invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
return postState, nil
|
||||
}
|
||||
|
||||
// updateJustificationOnBlock updates the justified checkpoint on DB if the
|
||||
// incoming block has updated it on forkchoice.
|
||||
func (s *Service) updateJustificationOnBlock(ctx context.Context, preState, postState state.BeaconState, preJustifiedEpoch primitives.Epoch) error {
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > preJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateFinalizationOnBlock performs some duties when the incoming block
|
||||
// changes the finalized checkpoint. It returns true when this has happened.
|
||||
func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postState state.BeaconState, preFinalizedEpoch primitives.Epoch) (bool, error) {
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > preFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
isValidPayload = s.head.optimistic
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: stateRoot[:],
|
||||
ExecutionOptimistic: isValidPayload,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// sendBlockAttestationsToSlasher sends the incoming block's attestation to the slasher
|
||||
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
}
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
return isValidPayload, nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -408,7 +409,7 @@ func (s *Service) initializeBeaconChain(
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -533,7 +533,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SetForkchoiceGenesisTime mocks the same method in the chain service
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
|
||||
@@ -195,6 +195,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
}
|
||||
|
||||
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
|
||||
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
|
||||
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
@@ -223,13 +224,12 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
syncErr := fmt.Errorf(
|
||||
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
|
||||
"sync message time %v (message slot %d) not within allowable range of %v to %v (current slot %d)",
|
||||
messageTime,
|
||||
slot,
|
||||
lowerBound,
|
||||
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
upperBound,
|
||||
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
currentSlot,
|
||||
)
|
||||
// Wrap error message if sync message is too late.
|
||||
if messageTime.Before(lowerBound) {
|
||||
|
||||
@@ -311,7 +311,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 16,
|
||||
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
|
||||
},
|
||||
wantedErr: "(slot 16) not within allowable range of",
|
||||
wantedErr: "(message slot 16) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
|
||||
@@ -327,7 +327,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
|
||||
},
|
||||
wantedErr: "(slot 100) not within allowable range of",
|
||||
wantedErr: "(message slot 100) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
|
||||
@@ -343,7 +343,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 101,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "(slot 101) not within allowable range of",
|
||||
wantedErr: "(message slot 101) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot is well beyond current slot",
|
||||
|
||||
@@ -39,6 +39,8 @@ type BlockProcessedData struct {
|
||||
SignedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
// Verified is true if the block's BLS contents have been verified.
|
||||
Verified bool
|
||||
// Optimistic is true if the block is optimistic.
|
||||
Optimistic bool
|
||||
}
|
||||
|
||||
// ChainStartedData is the data sent with ChainStarted events.
|
||||
|
||||
@@ -336,20 +336,21 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
}
|
||||
|
||||
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
|
||||
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState) error {
|
||||
// Input state is used to retrieve active validator indices.
|
||||
// Input epoch is the epoch to retrieve proposer indices for.
|
||||
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
|
||||
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
|
||||
// Which is the reason why we skip genesis epoch.
|
||||
if time.CurrentEpoch(state) <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use state root from (current_epoch - 1))
|
||||
wantedEpoch := time.PrevEpoch(state)
|
||||
s, err := slots.EpochEnd(wantedEpoch)
|
||||
s, err := slots.EpochEnd(epoch - 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := StateRootAtSlot(state, s)
|
||||
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -366,11 +367,11 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
return nil
|
||||
}
|
||||
|
||||
indices, err := ActiveValidatorIndices(ctx, state, time.CurrentEpoch(state))
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices)
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -432,11 +433,10 @@ func computeCommittee(
|
||||
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex) ([]primitives.ValidatorIndex, error) {
|
||||
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
e := time.CurrentEpoch(state)
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not generate seed")
|
||||
|
||||
@@ -639,7 +639,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices)
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
|
||||
@@ -262,7 +262,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
}
|
||||
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
|
||||
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,9 +297,6 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
@@ -491,9 +488,6 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
|
||||
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
|
||||
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
|
||||
defer span.End()
|
||||
|
||||
@@ -513,9 +507,6 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
|
||||
|
||||
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
|
||||
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
|
||||
defer span.End()
|
||||
|
||||
@@ -560,19 +551,7 @@ func (s *Service) ReconstructFullBlock(
|
||||
}
|
||||
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = blindedBlock.Version()
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
payload, err := s.retrievePayloadFromExecutionHash(ctx, executionBlockHash, header, blindedBlock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -619,32 +598,9 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
executionHashes = append(executionHashes, executionBlockHash)
|
||||
}
|
||||
}
|
||||
execBlocks, err := s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
|
||||
fullBlocks, err := s.retrievePayloadsFromExecutionHashes(ctx, executionHashes, validExecPayloads, blindedBlocks)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
|
||||
// For each valid payload, we reconstruct the full block from it with the
|
||||
// blinded block.
|
||||
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
b := execBlocks[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := fullPayloadFromExecutionBlock(header, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
return nil, err
|
||||
}
|
||||
// For blocks that are pre-merge we simply reconstruct them via an empty
|
||||
// execution payload.
|
||||
@@ -660,6 +616,95 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) {
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if len(pBodies) != 1 {
|
||||
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
|
||||
}
|
||||
bdy := pBodies[0]
|
||||
return fullPayloadFromPayloadBody(header, bdy, version)
|
||||
}
|
||||
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = version
|
||||
return fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
ctx context.Context,
|
||||
executionHashes []common.Hash,
|
||||
validExecPayloads []int,
|
||||
blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
|
||||
var execBlocks []*pb.ExecutionBlock
|
||||
var payloadBodies []*pb.ExecutionPayloadBodyV1
|
||||
var err error
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
} else {
|
||||
execBlocks, err = s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
}
|
||||
|
||||
// For each valid payload, we reconstruct the full block from it with the
|
||||
// blinded block.
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
var payload interfaces.ExecutionData
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
b := payloadBodies[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromPayloadBody(header, b, blindedBlocks[realIdx].Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
b := execBlocks[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromExecutionBlock(header, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
}
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -721,6 +766,50 @@ func fullPayloadFromExecutionBlock(
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBodyV1, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || body == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
|
||||
if bVersion == version.Bellatrix {
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
})
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
func handleRPCError(err error) error {
|
||||
if err == nil {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
@@ -16,7 +16,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == payloadHash {
|
||||
if node.payloadHash == lastValidHash {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
} else {
|
||||
@@ -28,7 +28,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
}
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
|
||||
@@ -987,7 +987,8 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
opts := append(b.serviceFlagOpts.builderOpts,
|
||||
builder.WithHeadFetcher(chainService),
|
||||
builder.WithDatabase(b.db))
|
||||
if cliCtx.Bool(flags.EnableRegistrationCache.Name) {
|
||||
// make cache the default.
|
||||
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
|
||||
opts = append(opts, builder.WithRegistrationCache())
|
||||
}
|
||||
svc, err := builder.NewService(b.ctx, opts...)
|
||||
|
||||
@@ -108,12 +108,31 @@ func (s *Store) DeletePeerData(pid peer.ID) {
|
||||
}
|
||||
|
||||
// SetTrustedPeers sets our desired trusted peer set.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) SetTrustedPeers(peers []peer.ID) {
|
||||
for _, p := range peers {
|
||||
s.trustedPeers[p] = true
|
||||
}
|
||||
}
|
||||
|
||||
// GetTrustedPeers gets our desired trusted peer ids.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) GetTrustedPeers() []peer.ID {
|
||||
peers := []peer.ID{}
|
||||
for p := range s.trustedPeers {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// DeleteTrustedPeers removes peers from trusted peer set.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) DeleteTrustedPeers(peers []peer.ID) {
|
||||
for _, p := range peers {
|
||||
delete(s.trustedPeers, p)
|
||||
}
|
||||
}
|
||||
|
||||
// Peers returns map of peer data objects.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) Peers() map[peer.ID]*PeerData {
|
||||
|
||||
@@ -96,4 +96,16 @@ func TestStore_TrustedPeers(t *testing.T) {
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid3))
|
||||
|
||||
tPeers = store.GetTrustedPeers()
|
||||
assert.Equal(t, 3, len(tPeers))
|
||||
|
||||
store.DeleteTrustedPeers(tPeers)
|
||||
tPeers = store.GetTrustedPeers()
|
||||
assert.Equal(t, 0, len(tPeers))
|
||||
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid3))
|
||||
|
||||
}
|
||||
|
||||
@@ -560,6 +560,9 @@ func (p *Status) Prune() {
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
score float64
|
||||
@@ -567,7 +570,8 @@ func (p *Status) Prune() {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.Scorers().ScoreNoLock(pid),
|
||||
@@ -608,6 +612,9 @@ func (p *Status) deprecatedPrune() {
|
||||
notBadPeer := func(peerData *peerdata.PeerData) bool {
|
||||
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
@@ -615,7 +622,8 @@ func (p *Status) deprecatedPrune() {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
@@ -912,6 +920,32 @@ func (p *Status) SetTrustedPeers(peers []peer.ID) {
|
||||
p.store.SetTrustedPeers(peers)
|
||||
}
|
||||
|
||||
// GetTrustedPeers returns a list of all trusted peers' ids
|
||||
func (p *Status) GetTrustedPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
return p.store.GetTrustedPeers()
|
||||
}
|
||||
|
||||
// DeleteTrustedPeers removes peers from trusted peer set
|
||||
func (p *Status) DeleteTrustedPeers(peers []peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
p.store.DeleteTrustedPeers(peers)
|
||||
}
|
||||
|
||||
// IsTrustedPeers returns if given peer is a Trusted peer
|
||||
func (p *Status) IsTrustedPeers(pid peer.ID) bool {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
return p.isTrustedPeers(pid)
|
||||
}
|
||||
|
||||
// isTrustedPeers is the lock-free version of IsTrustedPeers.
|
||||
func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
return p.store.IsTrustedPeer(pid)
|
||||
}
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
|
||||
@@ -802,6 +802,11 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
p.SetTrustedPeers(trustedPeers)
|
||||
|
||||
// Assert we have correct trusted peers
|
||||
trustedPeers = p.GetTrustedPeers()
|
||||
assert.Equal(t, 6, len(trustedPeers))
|
||||
|
||||
// Assert all peers more than max are prunable.
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 16, len(peersToPrune))
|
||||
@@ -812,6 +817,34 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
assert.NotEqual(t, pid.String(), tPid.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Delete trusted peers.
|
||||
p.DeleteTrustedPeers(trustedPeers)
|
||||
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 25, len(peersToPrune))
|
||||
|
||||
// Check that trusted peers are pruned.
|
||||
for _, tPid := range trustedPeers {
|
||||
pruned := false
|
||||
for _, pid := range peersToPrune {
|
||||
if pid.String() == tPid.String() {
|
||||
pruned = true
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, pruned)
|
||||
}
|
||||
|
||||
// Assert have zero trusted peers
|
||||
trustedPeers = p.GetTrustedPeers()
|
||||
assert.Equal(t, 0, len(trustedPeers))
|
||||
|
||||
for _, pid := range peersToPrune {
|
||||
dir, err := p.Direction(pid)
|
||||
require.NoError(t, err)
|
||||
@@ -821,8 +854,8 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Ensure it is in the descending order.
|
||||
currScore := p.Scorers().Score(peersToPrune[0])
|
||||
for _, pid := range peersToPrune {
|
||||
score := p.Scorers().BadResponsesScorer().Score(pid)
|
||||
assert.Equal(t, true, currScore >= score)
|
||||
score := p.Scorers().Score(pid)
|
||||
assert.Equal(t, true, currScore <= score)
|
||||
currScore = score
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,9 +174,9 @@ func (s *Service) Start() {
|
||||
s.awaitStateInitialized()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var peersToWatch []string
|
||||
var relayNodes []string
|
||||
if s.cfg.RelayNodeAddr != "" {
|
||||
peersToWatch = append(peersToWatch, s.cfg.RelayNodeAddr)
|
||||
relayNodes = append(relayNodes, s.cfg.RelayNodeAddr)
|
||||
if err := dialRelayNode(s.ctx, s.host, s.cfg.RelayNodeAddr); err != nil {
|
||||
log.WithError(err).Errorf("Could not dial relay node")
|
||||
}
|
||||
@@ -213,8 +213,7 @@ func (s *Service) Start() {
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
s.peers.SetTrustedPeers(pids)
|
||||
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
|
||||
s.connectWithAllPeers(addrs)
|
||||
s.connectWithAllTrustedPeers(addrs)
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
@@ -226,7 +225,7 @@ func (s *Service) Start() {
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().TtfbTimeout, func() {
|
||||
ensurePeerConnections(s.ctx, s.host, peersToWatch...)
|
||||
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
|
||||
@@ -399,6 +398,24 @@ func (s *Service) awaitStateInitialized() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) connectWithAllTrustedPeers(multiAddrs []multiaddr.Multiaddr) {
|
||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer address info's from multiaddresses")
|
||||
return
|
||||
}
|
||||
for _, info := range addrInfos {
|
||||
// add peer into peer status
|
||||
s.peers.Add(nil, info.ID, info.Addrs[0], network.DirUnknown)
|
||||
// make each dial non-blocking
|
||||
go func(info peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
}(info)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
|
||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,28 +5,52 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
)
|
||||
|
||||
// ensurePeerConnections will attempt to reestablish connection to the peers
|
||||
// if there are currently no connections to that peer.
|
||||
func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
|
||||
if len(peers) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p == "" {
|
||||
func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status, relayNodes ...string) {
|
||||
// every time reset peersToWatch, add RelayNodes and trust peers
|
||||
var peersToWatch []*peer.AddrInfo
|
||||
|
||||
// add RelayNodes
|
||||
for _, node := range relayNodes {
|
||||
if node == "" {
|
||||
continue
|
||||
}
|
||||
peerInfo, err := MakePeer(p)
|
||||
peerInfo, err := MakePeer(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not make peer")
|
||||
continue
|
||||
}
|
||||
peersToWatch = append(peersToWatch, peerInfo)
|
||||
}
|
||||
|
||||
c := h.Network().ConnsToPeer(peerInfo.ID)
|
||||
// add trusted peers
|
||||
trustedPeers := peers.GetTrustedPeers()
|
||||
for _, trustedPeer := range trustedPeers {
|
||||
maddr, err := peers.Address(trustedPeer)
|
||||
|
||||
// avoid invalid trusted peers
|
||||
if err != nil || maddr == nil {
|
||||
log.WithField("peer", trustedPeers).WithError(err).Error("Could not get peer address")
|
||||
continue
|
||||
}
|
||||
peerInfo := &peer.AddrInfo{ID: trustedPeer}
|
||||
peerInfo.Addrs = []ma.Multiaddr{maddr}
|
||||
peersToWatch = append(peersToWatch, peerInfo)
|
||||
}
|
||||
|
||||
if len(peersToWatch) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range peersToWatch {
|
||||
c := h.Network().ConnsToPeer(p.ID)
|
||||
if len(c) == 0 {
|
||||
if err := connectWithTimeout(ctx, h, peerInfo); err != nil {
|
||||
log.WithField("peer", peerInfo.ID).WithField("addrs", peerInfo.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
if err := connectWithTimeout(ctx, h, p); err != nil {
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,10 +32,12 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
@@ -19,13 +20,6 @@ import (
|
||||
"github.com/r3labs/sse"
|
||||
)
|
||||
|
||||
const (
|
||||
versionHeader = "Eth-Consensus-Version"
|
||||
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
|
||||
jsonMediaType = "application/json"
|
||||
octetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
// match a number with optional decimals
|
||||
var priorityRegex = regexp.MustCompile(`q=(\d+(?:\.\d+)?)`)
|
||||
|
||||
@@ -223,7 +217,7 @@ func sszRequested(req *http.Request) (bool, error) {
|
||||
for _, t := range types {
|
||||
values := strings.Split(t, ";")
|
||||
name := values[0]
|
||||
if name != jsonMediaType && name != octetStreamMediaType {
|
||||
if name != api.JsonMediaType && name != api.OctetStreamMediaType {
|
||||
continue
|
||||
}
|
||||
// no params specified
|
||||
@@ -248,7 +242,7 @@ func sszRequested(req *http.Request) (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return currentType == octetStreamMediaType, nil
|
||||
return currentType == api.OctetStreamMediaType, nil
|
||||
}
|
||||
|
||||
func sszPosted(req *http.Request) bool {
|
||||
@@ -259,7 +253,7 @@ func sszPosted(req *http.Request) bool {
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == octetStreamMediaType
|
||||
return ct[0] == api.OctetStreamMediaType
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
@@ -278,10 +272,10 @@ func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint
|
||||
}
|
||||
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(versionHeader)
|
||||
ver := req.Header.Get(api.VersionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(versionHeader)
|
||||
req.Header.Add(grpcVersionHeader, ver)
|
||||
req.Header.Del(api.VersionHeader)
|
||||
req.Header.Add(grpc.WithPrefix(api.VersionHeader), ver)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,7 +291,7 @@ func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(data))
|
||||
req.ContentLength = int64(len(data))
|
||||
req.Header.Set("Content-Type", jsonMediaType)
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -325,9 +319,9 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", octetStreamMediaType)
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
w.Header().Set(versionHeader, respVersion)
|
||||
w.Header().Set(api.VersionHeader, respVersion)
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
@@ -45,7 +46,7 @@ func (t testSSZResponseJson) SSZFinalized() bool {
|
||||
func TestSSZRequested(t *testing.T) {
|
||||
t.Run("ssz_requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{octetStreamMediaType}
|
||||
request.Header["Accept"] = []string{api.OctetStreamMediaType}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -53,7 +54,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_first", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", octetStreamMediaType, jsonMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -61,7 +62,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -69,7 +70,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", octetStreamMediaType, jsonMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -77,7 +78,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("other_content_type_preferred", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
@@ -85,7 +86,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("other_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
@@ -153,7 +154,7 @@ func TestPreparePostedSszData(t *testing.T) {
|
||||
|
||||
preparePostedSSZData(request)
|
||||
assert.Equal(t, int64(19), request.ContentLength)
|
||||
assert.Equal(t, jsonMediaType, request.Header.Get("Content-Type"))
|
||||
assert.Equal(t, api.JsonMediaType, request.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
|
||||
@@ -209,12 +210,12 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
|
||||
v, ok = writer.Header()["Content-Type"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, octetStreamMediaType, v[0])
|
||||
assert.Equal(t, api.OctetStreamMediaType, v[0])
|
||||
v, ok = writer.Header()["Content-Disposition"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "attachment; filename=test.ssz", v[0])
|
||||
v, ok = writer.Header()[versionHeader]
|
||||
v, ok = writer.Header()[api.VersionHeader]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "version", v[0])
|
||||
|
||||
25
beacon-chain/rpc/core/BUILD.bazel
Normal file
25
beacon-chain/rpc/core/BUILD.bazel
Normal file
@@ -0,0 +1,25 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"errors.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
],
|
||||
)
|
||||
49
beacon-chain/rpc/core/errors.go
Normal file
49
beacon-chain/rpc/core/errors.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type ErrorReason uint8
|
||||
|
||||
const (
|
||||
Internal = iota
|
||||
Unavailable
|
||||
BadRequest
|
||||
// Add more errors as needed
|
||||
)
|
||||
|
||||
type RpcError struct {
|
||||
Err error
|
||||
Reason ErrorReason
|
||||
}
|
||||
|
||||
func ErrorReasonToGRPC(reason ErrorReason) codes.Code {
|
||||
switch reason {
|
||||
case Internal:
|
||||
return codes.Internal
|
||||
case Unavailable:
|
||||
return codes.Unavailable
|
||||
case BadRequest:
|
||||
return codes.InvalidArgument
|
||||
// Add more cases for other error reasons as needed
|
||||
default:
|
||||
return codes.Internal
|
||||
}
|
||||
}
|
||||
|
||||
func ErrorReasonToHTTP(reason ErrorReason) int {
|
||||
switch reason {
|
||||
case Internal:
|
||||
return http.StatusInternalServerError
|
||||
case Unavailable:
|
||||
return http.StatusServiceUnavailable
|
||||
case BadRequest:
|
||||
return http.StatusBadRequest
|
||||
// Add more cases for other error reasons as needed
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
168
beacon-chain/rpc/core/validator.go
Normal file
168
beacon-chain/rpc/core/validator.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
func ComputeValidatorPerformance(
|
||||
ctx context.Context,
|
||||
req *ethpb.ValidatorPerformanceRequest,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
currSlot primitives.Slot,
|
||||
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
|
||||
headState, err := headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
|
||||
}
|
||||
if currSlot > headState.Slot() {
|
||||
headRoot, err := headFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
|
||||
}
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "could not process slots up to %d", currSlot), Reason: Internal}
|
||||
}
|
||||
}
|
||||
var validatorSummary []*precompute.Validator
|
||||
if headState.Version() == version.Phase0 {
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else if headState.Version() >= version.Altair {
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "head state version %d not supported", headState.Version()), Reason: Internal}
|
||||
}
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
|
||||
missingValidators := make([][]byte, 0, responseCap)
|
||||
|
||||
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
|
||||
// Convert the list of validator public keys to validator indices and add to the indices set.
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
// Skip empty public key.
|
||||
if len(pubKey) == 0 {
|
||||
continue
|
||||
}
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubKey)
|
||||
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
if !ok {
|
||||
// Validator index not found, track as missing.
|
||||
missingValidators = append(missingValidators, pubKey)
|
||||
continue
|
||||
}
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Add provided indices to the indices set.
|
||||
for _, idx := range req.Indices {
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Depending on the indices and public keys given, results might not be sorted.
|
||||
sort.Slice(validatorIndices, func(i, j int) bool {
|
||||
return validatorIndices[i] < validatorIndices[j]
|
||||
})
|
||||
|
||||
currentEpoch := coreTime.CurrentEpoch(headState)
|
||||
responseCap = len(validatorIndices)
|
||||
pubKeys := make([][]byte, 0, responseCap)
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
inactivityScores := make([]uint64, 0, responseCap)
|
||||
// Append performance summaries.
|
||||
// Also track missing validators using public keys.
|
||||
for _, idx := range validatorIndices {
|
||||
val, err := headState.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get validator"), Reason: Internal}
|
||||
}
|
||||
pubKey := val.PublicKey()
|
||||
if uint64(idx) >= uint64(len(validatorSummary)) {
|
||||
// Not listed in validator summary yet; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
// Inactive validator; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
|
||||
summary := validatorSummary[idx]
|
||||
pubKeys = append(pubKeys, pubKey[:])
|
||||
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
|
||||
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
|
||||
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
|
||||
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
|
||||
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: pubKeys,
|
||||
CorrectlyVotedSource: correctlyVotedSource,
|
||||
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: correctlyVotedHead,
|
||||
CurrentEffectiveBalances: effectiveBalances,
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -67,6 +68,7 @@ go_library(
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
@@ -91,6 +93,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -17,7 +18,9 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -38,7 +41,9 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err := getBlindedBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
@@ -196,11 +201,11 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -17,11 +19,13 @@ import (
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -321,7 +325,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -342,7 +346,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -363,7 +367,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -381,7 +385,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -402,7 +406,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -420,7 +424,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
@@ -25,16 +26,16 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
const versionHeader = "eth-consensus-version"
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
@@ -253,11 +254,11 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
@@ -424,6 +425,9 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err = getBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
@@ -444,7 +447,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -465,7 +468,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -486,7 +489,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -504,7 +507,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -525,7 +528,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -543,7 +546,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -579,8 +582,8 @@ func TestServer_GetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetBlockV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -38,7 +41,142 @@ func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request)
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
isSSZ, err := network.SszRequested(r)
|
||||
if isSSZ && err == nil {
|
||||
publishBlindedBlockV2SSZ(bs, w, r)
|
||||
} else {
|
||||
publishBlindedBlockV2(bs, w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
capellaBlock := ðpbv2.SignedBlindedBeaconBlockCapella{}
|
||||
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.BlindedCapellaToV1Alpha1SignedBlock(capellaBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedCapella{
|
||||
BlindedCapella: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
bellatrixBlock := ðpbv2.SignedBlindedBeaconBlockBellatrix{}
|
||||
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(bellatrixBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{
|
||||
BlindedBellatrix: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
|
||||
// blinded is not supported before bellatrix hardfork
|
||||
altairBlock := ðpbv2.SignedBeaconBlockAltair{}
|
||||
if err := altairBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
phase0Block := ðpbv1.SignedBeaconBlock{}
|
||||
if err := phase0Block.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -167,7 +305,15 @@ func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
isSSZ, err := network.SszRequested(r)
|
||||
if isSSZ && err == nil {
|
||||
publishBlockV2SSZ(bs, w, r)
|
||||
} else {
|
||||
publishBlockV2(bs, w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -178,7 +324,140 @@ func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
capellaBlock := ðpbv2.SignedBeaconBlockCapella{}
|
||||
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
v1block, err := migration.CapellaToV1Alpha1SignedBlock(capellaBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Capella{
|
||||
Capella: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
bellatrixBlock := ðpbv2.SignedBeaconBlockBellatrix{}
|
||||
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(bellatrixBlock); err == nil {
|
||||
v1block, err := migration.BellatrixToV1Alpha1SignedBlock(bellatrixBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Bellatrix{
|
||||
Bellatrix: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
altairBlock := ðpbv2.SignedBeaconBlockAltair{}
|
||||
if err := altairBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(altairBlock); err == nil {
|
||||
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
phase0Block := ðpbv1.SignedBeaconBlock{}
|
||||
if err := phase0Block.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(phase0Block); err == nil {
|
||||
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body",
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
var capellaBlock *SignedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
@@ -30,7 +32,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -47,7 +49,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -64,7 +66,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -81,7 +83,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -92,7 +94,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -108,7 +110,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -117,9 +119,74 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Bellatrix)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
var bellablock SignedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(bellatrixBlock), &bellablock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := bellablock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Capella)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock SignedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(capellaBlock), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
@@ -131,7 +198,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -148,7 +215,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -165,7 +232,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -182,7 +249,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -193,7 +260,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -209,7 +276,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -218,6 +285,72 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedBellatrix)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var bellablock SignedBlindedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(blindedBellatrixBlock), &bellablock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := bellablock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedCapella)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock SignedBlindedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(blindedCapellaBlock), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBlindedCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
phase0Block = `{
|
||||
"message": {
|
||||
@@ -668,7 +801,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -688,7 +822,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload": {
|
||||
@@ -846,7 +980,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -866,7 +1001,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload_header": {
|
||||
@@ -1022,7 +1157,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -1042,7 +1178,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload": {
|
||||
@@ -1218,7 +1354,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -1238,7 +1375,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload_header": {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -42,7 +41,6 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
gwpb "github.com/grpc-ecosystem/grpc-gateway/v2/proto/gateway"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -80,26 +79,18 @@ func (s *Server) StreamEvents(
|
||||
}
|
||||
|
||||
// Subscribe to event feeds from information received in the beacon node runtime.
|
||||
blockChan := make(chan *feed.Event, 1)
|
||||
blockSub := s.BlockNotifier.BlockFeed().Subscribe(blockChan)
|
||||
|
||||
opsChan := make(chan *feed.Event, 1)
|
||||
opsSub := s.OperationNotifier.OperationFeed().Subscribe(opsChan)
|
||||
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
stateSub := s.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
|
||||
defer blockSub.Unsubscribe()
|
||||
defer opsSub.Unsubscribe()
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
// Handle each event received and context cancelation.
|
||||
for {
|
||||
select {
|
||||
case event := <-blockChan:
|
||||
if err := handleBlockEvents(stream, requestedTopics, event); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not handle block event: %v", err)
|
||||
}
|
||||
case event := <-opsChan:
|
||||
if err := handleBlockOperationEvents(stream, requestedTopics, event); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not handle block operations event: %v", err)
|
||||
@@ -116,37 +107,6 @@ func (s *Server) StreamEvents(
|
||||
}
|
||||
}
|
||||
|
||||
func handleBlockEvents(
|
||||
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
|
||||
) error {
|
||||
switch event.Type {
|
||||
case blockfeed.ReceivedBlock:
|
||||
if _, ok := requestedTopics[BlockTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
blkData, ok := event.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := v1Data.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash tree root block")
|
||||
}
|
||||
eventBlock := ðpb.EventBlock{
|
||||
Slot: v1Data.Message.Slot,
|
||||
Block: item[:],
|
||||
ExecutionOptimistic: blkData.IsOptimistic,
|
||||
}
|
||||
return streamData(stream, BlockTopic, eventBlock)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleBlockOperationEvents(
|
||||
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
|
||||
) error {
|
||||
@@ -252,6 +212,28 @@ func (s *Server) handleStateEvents(
|
||||
return nil
|
||||
}
|
||||
return streamData(stream, ChainReorgTopic, reorg)
|
||||
case statefeed.BlockProcessed:
|
||||
if _, ok := requestedTopics[BlockTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := v1Data.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash tree root block")
|
||||
}
|
||||
eventBlock := ðpb.EventBlock{
|
||||
Slot: blkData.Slot,
|
||||
Block: item[:],
|
||||
ExecutionOptimistic: blkData.Optimistic,
|
||||
}
|
||||
return streamData(stream, BlockTopic, eventBlock)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -51,55 +50,6 @@ func TestStreamEvents_Preconditions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_BlockEvents(t *testing.T) {
|
||||
t.Run(BlockTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
blk := util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 8,
|
||||
},
|
||||
})
|
||||
bodyRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wantedHeader := util.HydrateBeaconHeader(ð.BeaconBlockHeader{
|
||||
Slot: 8,
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genericResponse, err := anypb.New(ðpb.EventBlock{
|
||||
Slot: 8,
|
||||
Block: wantedBlockRoot[:],
|
||||
ExecutionOptimistic: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BlockTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BlockTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{
|
||||
SignedBlock: wsb,
|
||||
IsOptimistic: true,
|
||||
},
|
||||
},
|
||||
feed: srv.BlockNotifier.BlockFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
t.Run("attestation_unaggregated", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -588,6 +538,53 @@ func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
feed: srv.StateNotifier.StateFeed(),
|
||||
})
|
||||
})
|
||||
t.Run(BlockTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
blk := util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 8,
|
||||
},
|
||||
})
|
||||
bodyRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wantedHeader := util.HydrateBeaconHeader(ð.BeaconBlockHeader{
|
||||
Slot: 8,
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genericResponse, err := anypb.New(ðpb.EventBlock{
|
||||
Slot: 8,
|
||||
Block: wantedBlockRoot[:],
|
||||
ExecutionOptimistic: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BlockTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BlockTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: 8,
|
||||
SignedBlock: wsb,
|
||||
Optimistic: true,
|
||||
},
|
||||
},
|
||||
feed: srv.StateNotifier.StateFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
|
||||
@@ -651,7 +648,6 @@ func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
|
||||
|
||||
func setupServer(ctx context.Context, t testing.TB) (*Server, *gomock.Controller, *mock.MockEvents_StreamEventsServer) {
|
||||
srv := &Server{
|
||||
BlockNotifier: &mockChain.MockBlockNotifier{},
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
OperationNotifier: &mockChain.MockOperationNotifier{},
|
||||
Ctx: ctx,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
)
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier opfeed.Notifier
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
|
||||
@@ -38,6 +38,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -226,6 +226,123 @@ func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
|
||||
network.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// SyncCommitteeRewards retrieves rewards info for sync committee members specified by array of public keys or validator index.
|
||||
// If no array is provided, return reward info for every committee member.
|
||||
func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
blk, err := s.Blocker.Block(r.Context(), []byte(blockId))
|
||||
if errJson := handleGetBlockError(blk, err); errJson != nil {
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if blk.Version() == version.Phase0 {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Sync committee rewards are not supported for Phase 0",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get state: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
sa, err := blk.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get sync aggregate: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
vals, valIndices, ok := syncRewardsVals(w, r, st)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
preProcessBals := make([]uint64, len(vals))
|
||||
for i, valIdx := range valIndices {
|
||||
preProcessBals[i], err = st.BalanceAtIndex(valIdx)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get validator's balance: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, proposerReward, err := altair.ProcessSyncAggregate(r.Context(), st, sa)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get sync aggregate rewards: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
rewards := make([]int, len(preProcessBals))
|
||||
proposerIndex := blk.Block().ProposerIndex()
|
||||
for i, valIdx := range valIndices {
|
||||
bal, err := st.BalanceAtIndex(valIdx)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get validator's balance: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
rewards[i] = int(bal - preProcessBals[i]) // lint:ignore uintcast
|
||||
if valIdx == proposerIndex {
|
||||
rewards[i] = rewards[i] - int(proposerReward) // lint:ignore uintcast
|
||||
}
|
||||
}
|
||||
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get optimistic mode info: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get block root: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
scRewards := make([]SyncCommitteeReward, len(valIndices))
|
||||
for i, valIdx := range valIndices {
|
||||
scRewards[i] = SyncCommitteeReward{
|
||||
ValidatorIndex: strconv.FormatUint(uint64(valIdx), 10),
|
||||
Reward: strconv.Itoa(rewards[i]),
|
||||
}
|
||||
}
|
||||
response := &SyncCommitteeRewardsResponse{
|
||||
Data: scRewards,
|
||||
ExecutionOptimistic: optimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
|
||||
}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.BeaconState, bool) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
requestedEpoch, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
|
||||
@@ -298,57 +415,9 @@ func attRewardsBalancesAndVals(
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
var rawValIds []string
|
||||
if r.Body != http.NoBody {
|
||||
if err = json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
|
||||
for i, v := range rawValIds {
|
||||
index, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
pubkey, err := bytesutil.FromHexString(v)
|
||||
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
var ok bool
|
||||
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
} else {
|
||||
if index >= uint64(st.NumValidators()) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
valIndices[i] = primitives.ValidatorIndex(index)
|
||||
}
|
||||
}
|
||||
if len(valIndices) == 0 {
|
||||
valIndices = make([]primitives.ValidatorIndex, len(allVals))
|
||||
for i := 0; i < len(allVals); i++ {
|
||||
valIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
valIndices, ok := requestedValIndices(w, r, st, allVals)
|
||||
if !ok {
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
if len(valIndices) == len(allVals) {
|
||||
return bal, allVals, valIndices, true
|
||||
@@ -453,6 +522,120 @@ func totalAttRewards(
|
||||
return totalRewards, true
|
||||
}
|
||||
|
||||
func syncRewardsVals(
|
||||
w http.ResponseWriter,
|
||||
r *http.Request,
|
||||
st state.BeaconState,
|
||||
) ([]*precompute.Validator, []primitives.ValidatorIndex, bool) {
|
||||
allVals, _, err := altair.InitializePrecomputeValidators(r.Context(), st)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not initialize precompute validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
valIndices, ok := requestedValIndices(w, r, st, allVals)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
sc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get current sync committee: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
allScIndices := make([]primitives.ValidatorIndex, len(sc.Pubkeys))
|
||||
for i, pk := range sc.Pubkeys {
|
||||
valIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pk))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pk),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
allScIndices[i] = valIdx
|
||||
}
|
||||
|
||||
scIndices := make([]primitives.ValidatorIndex, 0, len(allScIndices))
|
||||
scVals := make([]*precompute.Validator, 0, len(allScIndices))
|
||||
for _, valIdx := range valIndices {
|
||||
for _, scIdx := range allScIndices {
|
||||
if valIdx == scIdx {
|
||||
scVals = append(scVals, allVals[valIdx])
|
||||
scIndices = append(scIndices, valIdx)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scVals, scIndices, true
|
||||
}
|
||||
|
||||
func requestedValIndices(w http.ResponseWriter, r *http.Request, st state.BeaconState, allVals []*precompute.Validator) ([]primitives.ValidatorIndex, bool) {
|
||||
var rawValIds []string
|
||||
if r.Body != http.NoBody {
|
||||
if err := json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
|
||||
for i, v := range rawValIds {
|
||||
index, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
pubkey, err := bytesutil.FromHexString(v)
|
||||
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
var ok bool
|
||||
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
} else {
|
||||
if index >= uint64(st.NumValidators()) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
valIndices[i] = primitives.ValidatorIndex(index)
|
||||
}
|
||||
}
|
||||
if len(valIndices) == 0 {
|
||||
valIndices = make([]primitives.ValidatorIndex, len(allVals))
|
||||
for i := 0; i < len(allVals); i++ {
|
||||
valIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
}
|
||||
|
||||
return valIndices, true
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
|
||||
if errors.Is(err, lookup.BlockIdParseError{}) {
|
||||
return &network.DefaultErrorJson{
|
||||
@@ -468,7 +651,7 @@ func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *n
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return &network.DefaultErrorJson{
|
||||
Message: "Could not find requested block" + err.Error(),
|
||||
Message: "Could not find requested block: " + err.Error(),
|
||||
Code: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlockRewards(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
valCount := 64
|
||||
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
@@ -65,7 +68,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
bRoots[0] = slot0bRoot
|
||||
require.NoError(t, st.SetBlockRoots(bRoots))
|
||||
|
||||
b := util.HydrateSignedBeaconBlockAltair(util.NewBeaconBlockAltair())
|
||||
b := util.HydrateSignedBeaconBlockCapella(util.NewBeaconBlockCapella())
|
||||
b.Block.Slot = 2
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
b.Block.ProposerIndex = 12
|
||||
@@ -206,6 +209,7 @@ func TestAttestationRewards(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
helpers.ClearCache()
|
||||
|
||||
valCount := 64
|
||||
|
||||
@@ -479,3 +483,298 @@ func TestAttestationRewards(t *testing.T) {
|
||||
assert.Equal(t, "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCommiteeRewards(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
helpers.ClearCache()
|
||||
|
||||
const valCount = 1024
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
const proposerIndex = 84
|
||||
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
validators := make([]*eth.Validator, 0, valCount)
|
||||
secretKeys := make([]bls.SecretKey, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
secretKeys = append(secretKeys, blsKey)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
})
|
||||
}
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
|
||||
syncCommitteePubkeys := make([][]byte, fieldparams.SyncCommitteeLength)
|
||||
for i := 0; i < fieldparams.SyncCommitteeLength; i++ {
|
||||
syncCommitteePubkeys[i] = secretKeys[i].PublicKey().Marshal()
|
||||
}
|
||||
aggPubkey, err := bls.AggregatePublicKeys(syncCommitteePubkeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ð.SyncCommittee{
|
||||
Pubkeys: syncCommitteePubkeys,
|
||||
AggregatePubkey: aggPubkey.Marshal(),
|
||||
}))
|
||||
|
||||
b := util.HydrateSignedBeaconBlockCapella(util.NewBeaconBlockCapella())
|
||||
b.Block.Slot = 32
|
||||
b.Block.ProposerIndex = proposerIndex
|
||||
scBits := bitfield.NewBitvector512()
|
||||
// last 10 sync committee members didn't perform their duty
|
||||
for i := uint64(0); i < fieldparams.SyncCommitteeLength-10; i++ {
|
||||
scBits.SetBitAt(i, true)
|
||||
}
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainSyncCommittee, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
sszBytes := primitives.SSZBytes("")
|
||||
r, err := signing.ComputeSigningRoot(&sszBytes, domain)
|
||||
require.NoError(t, err)
|
||||
// Bits set in sync committee bits determine which validators will be treated as participating in sync committee.
|
||||
// These validators have to sign the message.
|
||||
sigs := make([]bls.Signature, fieldparams.SyncCommitteeLength-10)
|
||||
for i := range sigs {
|
||||
sigs[i], err = blst.SignatureFromBytes(secretKeys[i].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
aggSig := bls.AggregateSignatures(sigs).Marshal()
|
||||
b.Block.Body.SyncAggregate = ð.SyncAggregate{SyncCommitteeBits: scBits, SyncCommitteeSignature: aggSig}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
phase0block, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: ¤tSlot}
|
||||
s := &Server{
|
||||
Blocker: &testutil.MockBlocker{SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
0: phase0block,
|
||||
32: sbb,
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
}
|
||||
|
||||
t.Run("ok - filtered vals", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
sum := uint64(0)
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.ParseUint(scReward.Reward, 10, 64)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, uint64(1396), sum)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("ok - all vals", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 512, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 343416, sum)
|
||||
})
|
||||
t.Run("ok - validator outside sync committee is ignored", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", "999", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 1396, sum)
|
||||
})
|
||||
t.Run("ok - proposer reward is deducted", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", "84", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 2094, sum)
|
||||
})
|
||||
t.Run("invalid validator index/pubkey", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
valIds, err := json.Marshal([]string{"10", "foo"})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
|
||||
})
|
||||
t.Run("unknown validator pubkey", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
privkey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := fmt.Sprintf("%#x", privkey.PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"10", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
|
||||
})
|
||||
t.Run("validator index too large", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
valIds, err := json.Marshal([]string{"10", "9999"})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "Validator index 9999 is too large. Maximum allowed index is 1023", e.Message)
|
||||
})
|
||||
t.Run("phase 0", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/0"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "Sync committee rewards are not supported for Phase 0", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ type Server struct {
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
// TODO: Init
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
@@ -40,3 +40,14 @@ type TotalAttestationReward struct {
|
||||
Source string `json:"source"`
|
||||
InclusionDelay string `json:"inclusion_delay"`
|
||||
}
|
||||
|
||||
type SyncCommitteeRewardsResponse struct {
|
||||
Data []SyncCommitteeReward `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type SyncCommitteeReward struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Reward string `json:"reward"`
|
||||
}
|
||||
|
||||
49
beacon-chain/rpc/prysm/node/BUILD.bazel
Normal file
49
beacon-chain/rpc/prysm/node/BUILD.bazel
Normal file
@@ -0,0 +1,49 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_test.go",
|
||||
"server_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/host/peerstore/test:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
],
|
||||
)
|
||||
177
beacon-chain/rpc/prysm/node/handlers.go
Normal file
177
beacon-chain/rpc/prysm/node/handlers.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
corenet "github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ListTrustedPeer retrieves data about the node's trusted peers.
|
||||
func (s *Server) ListTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
peerStatus := s.PeersFetcher.Peers()
|
||||
allIds := s.PeersFetcher.Peers().GetTrustedPeers()
|
||||
allPeers := make([]*Peer, 0, len(allIds))
|
||||
for _, id := range allIds {
|
||||
p, err := httpPeerInfo(peerStatus, id)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not get peer info").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
// peers added into trusted set but never connected should also be listed
|
||||
if p == nil {
|
||||
p = &Peer{
|
||||
PeerID: id.String(),
|
||||
Enr: "",
|
||||
LastSeenP2PAddress: "",
|
||||
State: eth.ConnectionState(corenet.NotConnected).String(),
|
||||
Direction: eth.PeerDirection(corenet.DirUnknown).String(),
|
||||
}
|
||||
}
|
||||
allPeers = append(allPeers, p)
|
||||
}
|
||||
response := &PeersResponse{Peers: allPeers}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// AddTrustedPeer adds a new peer into node's trusted peer set by Multiaddr
|
||||
func (s *Server) AddTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not read request body").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
var addrRequest *AddrRequest
|
||||
err = json.Unmarshal(body, &addrRequest)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not decode request body into peer address").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
info, err := peer.AddrInfoFromString(addrRequest.Addr)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not derive peer info from multiaddress").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
// also add new peerdata to peers
|
||||
direction, err := s.PeersFetcher.Peers().Direction(info.ID)
|
||||
if err != nil {
|
||||
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], corenet.DirUnknown)
|
||||
} else {
|
||||
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], direction)
|
||||
}
|
||||
|
||||
peers := []peer.ID{}
|
||||
peers = append(peers, info.ID)
|
||||
s.PeersFetcher.Peers().SetTrustedPeers(peers)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// RemoveTrustedPeer removes peer from our trusted peer set but does not close connection.
|
||||
func (s *Server) RemoveTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
id := segments[len(segments)-1]
|
||||
peerId, err := peer.Decode(id)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not decode peer id").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
// if the peer is not a trusted peer, do nothing but return 200
|
||||
if !s.PeersFetcher.Peers().IsTrustedPeers(peerId) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
peers := []peer.ID{}
|
||||
peers = append(peers, peerId)
|
||||
s.PeersFetcher.Peers().DeleteTrustedPeers(peers)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// httpPeerInfo does the same thing as peerInfo function in node.go but returns the
|
||||
// http peer response.
|
||||
func httpPeerInfo(peerStatus *peers.Status, id peer.ID) (*Peer, error) {
|
||||
enr, err := peerStatus.ENR(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain ENR")
|
||||
}
|
||||
var serializedEnr string
|
||||
if enr != nil {
|
||||
serializedEnr, err = p2p.SerializeENR(enr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize ENR")
|
||||
}
|
||||
}
|
||||
address, err := peerStatus.Address(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain address")
|
||||
}
|
||||
connectionState, err := peerStatus.ConnectionState(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain connection state")
|
||||
}
|
||||
direction, err := peerStatus.Direction(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain direction")
|
||||
}
|
||||
if eth.PeerDirection(direction) == eth.PeerDirection_UNKNOWN {
|
||||
return nil, nil
|
||||
}
|
||||
v1ConnState := eth.ConnectionState(connectionState).String()
|
||||
v1PeerDirection := eth.PeerDirection(direction).String()
|
||||
p := Peer{
|
||||
PeerID: id.String(),
|
||||
State: v1ConnState,
|
||||
Direction: v1PeerDirection,
|
||||
}
|
||||
if address != nil {
|
||||
p.LastSeenP2PAddress = address.String()
|
||||
}
|
||||
if serializedEnr != "" {
|
||||
p.Enr = "enr:" + serializedEnr
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
250
beacon-chain/rpc/prysm/node/handlers_test.go
Normal file
250
beacon-chain/rpc/prysm/node/handlers_test.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
corenet "github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
libp2ptest "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
type testIdentity enode.ID
|
||||
|
||||
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
|
||||
|
||||
func TestListTrustedPeer(t *testing.T) {
|
||||
ids := libp2ptest.GeneratePeerIDs(9)
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
peerStatus := peerFetcher.Peers()
|
||||
|
||||
for i, id := range ids {
|
||||
if i == len(ids)-1 {
|
||||
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
|
||||
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
|
||||
require.NoError(t, err)
|
||||
peerStatus.Add(nil, id, p2pMultiAddr, corenet.DirUnknown)
|
||||
continue
|
||||
}
|
||||
enrRecord := &enr.Record{}
|
||||
err := enrRecord.SetSig(testIdentity{1}, []byte{42})
|
||||
require.NoError(t, err)
|
||||
enrRecord.Set(enr.IPv4{127, 0, 0, byte(i)})
|
||||
err = enrRecord.SetSig(testIdentity{}, []byte{})
|
||||
require.NoError(t, err)
|
||||
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
|
||||
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
var direction corenet.Direction
|
||||
if i%2 == 0 {
|
||||
direction = corenet.DirInbound
|
||||
} else {
|
||||
direction = corenet.DirOutbound
|
||||
}
|
||||
peerStatus.Add(enrRecord, id, p2pMultiAddr, direction)
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
}
|
||||
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
// set all peers as trusted peers
|
||||
s.PeersFetcher.Peers().SetTrustedPeers(ids)
|
||||
|
||||
t.Run("Peer data OK", func(t *testing.T) {
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.ListTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &PeersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
peers := resp.Peers
|
||||
// assert number of trusted peer is right
|
||||
assert.Equal(t, 9, len(peers))
|
||||
|
||||
for i := 0; i < 9; i++ {
|
||||
pid, err := peer.Decode(peers[i].PeerID)
|
||||
require.NoError(t, err)
|
||||
if pid == ids[8] {
|
||||
assert.Equal(t, "", peers[i].Enr)
|
||||
assert.Equal(t, "", peers[i].LastSeenP2PAddress)
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "UNKNOWN", peers[i].Direction)
|
||||
continue
|
||||
}
|
||||
expectedEnr, err := peerStatus.ENR(pid)
|
||||
require.NoError(t, err)
|
||||
serializeENR, err := p2p.SerializeENR(expectedEnr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "enr:"+serializeENR, peers[i].Enr)
|
||||
expectedP2PAddr, err := peerStatus.Address(pid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedP2PAddr.String(), peers[i].LastSeenP2PAddress)
|
||||
switch pid {
|
||||
case ids[0]:
|
||||
assert.Equal(t, "CONNECTING", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[1]:
|
||||
assert.Equal(t, "CONNECTING", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[2]:
|
||||
assert.Equal(t, "CONNECTED", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[3]:
|
||||
assert.Equal(t, "CONNECTED", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[4]:
|
||||
assert.Equal(t, "DISCONNECTING", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[5]:
|
||||
assert.Equal(t, "DISCONNECTING", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[6]:
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[7]:
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
default:
|
||||
t.Fatalf("Failed to get connection state and direction for peer")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestListTrustedPeers_NoPeersReturnsEmptyArray(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.ListTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &PeersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
peers := resp.Peers
|
||||
assert.Equal(t, 0, len(peers))
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
addr := &AddrRequest{
|
||||
Addr: "/ip4/127.0.0.1/tcp/30303/p2p/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ",
|
||||
}
|
||||
addrJson, err := json.Marshal(addr)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(addrJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer_EmptyBody(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, "Could not decode request body into peer address: unexpected end of JSON input", e.Message)
|
||||
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer_BadAddress(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
addr := &AddrRequest{
|
||||
Addr: "anything/but/not/an/address",
|
||||
}
|
||||
addrJson, err := json.Marshal(addr)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(addrJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Could not derive peer info from multiaddress", e.Message)
|
||||
}
|
||||
|
||||
func TestRemoveTrustedPeer(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine.but.last.is.important/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ"
|
||||
request := httptest.NewRequest("DELETE", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.RemoveTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoveTrustedPeer_EmptyParameter(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("DELETE", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.RemoveTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, "Could not decode peer id: failed to parse peer ID: invalid cid: cid too short", e.Message)
|
||||
}
|
||||
21
beacon-chain/rpc/prysm/node/server.go
Normal file
21
beacon-chain/rpc/prysm/node/server.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
}
|
||||
1
beacon-chain/rpc/prysm/node/server_test.go
Normal file
1
beacon-chain/rpc/prysm/node/server_test.go
Normal file
@@ -0,0 +1 @@
|
||||
package node
|
||||
17
beacon-chain/rpc/prysm/node/structs.go
Normal file
17
beacon-chain/rpc/prysm/node/structs.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package node
|
||||
|
||||
type AddrRequest struct {
|
||||
Addr string `json:"addr"`
|
||||
}
|
||||
|
||||
type PeersResponse struct {
|
||||
Peers []*Peer `json:"Peers"`
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
LastSeenP2PAddress string `json:"last_seen_p2p_address"`
|
||||
State string `json:"state"`
|
||||
Direction string `json:"direction"`
|
||||
}
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -659,153 +660,14 @@ func (bs *Server) GetValidatorPerformance(
|
||||
ctx context.Context, req *ethpb.ValidatorPerformanceRequest,
|
||||
) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
if bs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
|
||||
|
||||
if currSlot > headState.Slot() {
|
||||
headRoot, err := bs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", currSlot, err)
|
||||
}
|
||||
response, err := core.ComputeValidatorPerformance(ctx, req, bs.HeadFetcher, currSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not compute validator performance: %v", err.Err)
|
||||
}
|
||||
var validatorSummary []*precompute.Validator
|
||||
if headState.Version() == version.Phase0 {
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else if headState.Version() >= version.Altair {
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else {
|
||||
return nil, status.Errorf(codes.Internal, "Head state version %d not supported", headState.Version())
|
||||
}
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
|
||||
missingValidators := make([][]byte, 0, responseCap)
|
||||
|
||||
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
|
||||
// Convert the list of validator public keys to validator indices and add to the indices set.
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
// Skip empty public key.
|
||||
if len(pubKey) == 0 {
|
||||
continue
|
||||
}
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubKey)
|
||||
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
if !ok {
|
||||
// Validator index not found, track as missing.
|
||||
missingValidators = append(missingValidators, pubKey)
|
||||
continue
|
||||
}
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Add provided indices to the indices set.
|
||||
for _, idx := range req.Indices {
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Depending on the indices and public keys given, results might not be sorted.
|
||||
sort.Slice(validatorIndices, func(i, j int) bool {
|
||||
return validatorIndices[i] < validatorIndices[j]
|
||||
})
|
||||
|
||||
currentEpoch := coreTime.CurrentEpoch(headState)
|
||||
responseCap = len(validatorIndices)
|
||||
pubKeys := make([][]byte, 0, responseCap)
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
inactivityScores := make([]uint64, 0, responseCap)
|
||||
// Append performance summaries.
|
||||
// Also track missing validators using public keys.
|
||||
for _, idx := range validatorIndices {
|
||||
val, err := headState.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get validator: %v", err)
|
||||
}
|
||||
pubKey := val.PublicKey()
|
||||
if uint64(idx) >= uint64(len(validatorSummary)) {
|
||||
// Not listed in validator summary yet; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
// Inactive validator; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
|
||||
summary := validatorSummary[idx]
|
||||
pubKeys = append(pubKeys, pubKey[:])
|
||||
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
|
||||
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
|
||||
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
|
||||
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
|
||||
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: pubKeys,
|
||||
CorrectlyVotedSource: correctlyVotedSource,
|
||||
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: correctlyVotedHead,
|
||||
CurrentEffectiveBalances: effectiveBalances,
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// GetIndividualVotes retrieves individual voting status of validators.
|
||||
|
||||
@@ -25,7 +25,7 @@ func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBea
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := blk.SetSyncAggregate(emptyAggregate); err != nil {
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
|
||||
emptySig := [96]byte{0xC0}
|
||||
want := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
require.DeepEqual(t, want, agg)
|
||||
|
||||
40
beacon-chain/rpc/prysm/validator/BUILD.bazel
Normal file
40
beacon-chain/rpc/prysm/validator/BUILD.bazel
Normal file
@@ -0,0 +1,40 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"server.go",
|
||||
"validator_performance.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validator_performance_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
14
beacon-chain/rpc/prysm/validator/server.go
Normal file
14
beacon-chain/rpc/prysm/validator/server.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
// Server defines a server implementation for HTTP endpoints, providing
|
||||
// access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
78
beacon-chain/rpc/prysm/validator/validator_performance.go
Normal file
78
beacon-chain/rpc/prysm/validator/validator_performance.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type ValidatorPerformanceRequest struct {
|
||||
PublicKeys [][]byte `json:"public_keys,omitempty"`
|
||||
Indices []primitives.ValidatorIndex `json:"indices,omitempty"`
|
||||
}
|
||||
|
||||
type ValidatorPerformanceResponse struct {
|
||||
PublicKeys [][]byte `json:"public_keys,omitempty"`
|
||||
CorrectlyVotedSource []bool `json:"correctly_voted_source,omitempty"`
|
||||
CorrectlyVotedTarget []bool `json:"correctly_voted_target,omitempty"`
|
||||
CorrectlyVotedHead []bool `json:"correctly_voted_head,omitempty"`
|
||||
CurrentEffectiveBalances []uint64 `json:"current_effective_balances,omitempty"`
|
||||
BalancesBeforeEpochTransition []uint64 `json:"balances_before_epoch_transition,omitempty"`
|
||||
BalancesAfterEpochTransition []uint64 `json:"balances_after_epoch_transition,omitempty"`
|
||||
MissingValidators [][]byte `json:"missing_validators,omitempty"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
|
||||
}
|
||||
|
||||
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
|
||||
func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
|
||||
if vs.SyncChecker.Syncing() {
|
||||
handleHTTPError(w, "Syncing", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
currSlot := vs.GenesisTimeFetcher.CurrentSlot()
|
||||
var req ValidatorPerformanceRequest
|
||||
if r.Body != http.NoBody {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
handleHTTPError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
computed, err := core.ComputeValidatorPerformance(
|
||||
ctx,
|
||||
ðpb.ValidatorPerformanceRequest{
|
||||
PublicKeys: req.PublicKeys,
|
||||
Indices: req.Indices,
|
||||
},
|
||||
vs.HeadFetcher,
|
||||
currSlot,
|
||||
)
|
||||
if err != nil {
|
||||
handleHTTPError(w, "Could not compute validator performance: "+err.Err.Error(), core.ErrorReasonToHTTP(err.Reason))
|
||||
return
|
||||
}
|
||||
response := &ValidatorPerformanceResponse{
|
||||
PublicKeys: computed.PublicKeys,
|
||||
CorrectlyVotedSource: computed.CorrectlyVotedSource,
|
||||
CorrectlyVotedTarget: computed.CorrectlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: computed.CorrectlyVotedHead,
|
||||
CurrentEffectiveBalances: computed.CurrentEffectiveBalances,
|
||||
BalancesBeforeEpochTransition: computed.BalancesBeforeEpochTransition,
|
||||
BalancesAfterEpochTransition: computed.BalancesAfterEpochTransition,
|
||||
MissingValidators: computed.MissingValidators,
|
||||
InactivityScores: computed.InactivityScores, // Only populated in Altair
|
||||
}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func handleHTTPError(w http.ResponseWriter, message string, code int) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: message,
|
||||
Code: code,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
453
beacon-chain/rpc/prysm/validator/validator_performance_test.go
Normal file
453
beacon-chain/rpc/prysm/validator/validator_performance_test.go
Normal file
@@ -0,0 +1,453 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
t.Run("Syncing", func(t *testing.T) {
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusServiceUnavailable, rawResp.StatusCode)
|
||||
})
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Indices", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
|
||||
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
Indices: []primitives.ValidatorIndex{2, 1, 0},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Indices Pubkeys", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
|
||||
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:]}, Indices: []primitives.ValidatorIndex{1, 2},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Altair OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateAltair(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Bellatrix OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Capella OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateCapella(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
}
|
||||
|
||||
func setHeadState(t *testing.T, headState state.BeaconState, publicKeys [][48]byte) state.BeaconState {
|
||||
epoch := primitives.Epoch(1)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
if headState.Version() < version.Altair {
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{},
|
||||
InclusionDelay: 1,
|
||||
}
|
||||
require.NoError(t, headState.AppendPreviousEpochAttestations(atts[i]))
|
||||
}
|
||||
}
|
||||
|
||||
defaultBal := params.BeaconConfig().MaxEffectiveBalance
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
|
||||
require.NoError(t, headState.SetBalances(balances))
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: publicKeys[0][:],
|
||||
ActivationEpoch: 5,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKeys[1][:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKeys[2][:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
require.NoError(t, headState.SetValidators(validators))
|
||||
return headState
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -37,10 +38,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
nodeprysm "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node"
|
||||
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/beacon"
|
||||
debugv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/debug"
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
httpserver "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator"
|
||||
slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
@@ -216,15 +219,17 @@ func (s *Service) Start() {
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards)
|
||||
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/sync_committee/{block_id}", rewardsServer.SyncCommitteeRewards).Methods(http.MethodPost)
|
||||
|
||||
builderServer := &rpcBuilder.Server{
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
Stater: stater,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -306,6 +311,22 @@ func (s *Service) Start() {
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
nodeServerPrysm := &nodeprysm.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete)
|
||||
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
@@ -354,8 +375,14 @@ func (s *Service) Start() {
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2)
|
||||
httpServer := &httpserver.Server{
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2).Methods(http.MethodPost)
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
@@ -364,7 +391,6 @@ func (s *Service) Start() {
|
||||
ethpbservice.RegisterEventsServer(s.grpcServer, &events.Server{
|
||||
Ctx: s.ctx,
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
|
||||
@@ -207,7 +207,7 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
|
||||
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
return stateutil.OptimizedValidatorRoots(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
@@ -218,15 +218,6 @@ func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll b
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
|
||||
@@ -155,6 +155,20 @@ func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error
|
||||
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
rInfo, ok, err := e.getByBlockRootLockFree(blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
slotInfo := &slotRootInfo{
|
||||
slot: rInfo.state.Slot(),
|
||||
blockRoot: blockRoot,
|
||||
}
|
||||
if err = e.slotRootCache.Delete(slotInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.rootStateCache.Delete(&rootStateInfo{
|
||||
root: blockRoot,
|
||||
})
|
||||
|
||||
@@ -36,12 +36,12 @@ func (_ *State) replayBlocks(
|
||||
var err error
|
||||
|
||||
start := time.Now()
|
||||
log = log.WithFields(logrus.Fields{
|
||||
rLog := log.WithFields(logrus.Fields{
|
||||
"startSlot": state.Slot(),
|
||||
"endSlot": targetSlot,
|
||||
"diff": targetSlot - state.Slot(),
|
||||
})
|
||||
log.Debug("Replaying state")
|
||||
rLog.Debug("Replaying state")
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
if len(signed) > 0 {
|
||||
for i := len(signed) - 1; i >= 0; i-- {
|
||||
@@ -71,7 +71,7 @@ func (_ *State) replayBlocks(
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
rLog.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("Replayed state")
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ package stategen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -79,6 +81,41 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
|
||||
if err := s.epochBoundaryStateCache.put(blockRoot, st); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Always check that the correct epoch boundary states have been saved
|
||||
// for the current epoch.
|
||||
epochStart, err := slots.EpochStart(slots.ToEpoch(st.Slot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bRoot, err := helpers.BlockRootAtSlot(st, epochStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok, err := s.epochBoundaryStateCache.getByBlockRoot([32]byte(bRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We would only recover the boundary states under this condition:
|
||||
//
|
||||
// 1) Would indicate that the epoch boundary was skipped due to a missed slot, we
|
||||
// then recover by saving the state at that particular slot here.
|
||||
if !ok {
|
||||
// Only recover the state if it is in our hot state cache, otherwise we
|
||||
// simply skip this step.
|
||||
if s.hotStateCache.has([32]byte(bRoot)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": epochStart,
|
||||
"root": fmt.Sprintf("%#x", bRoot),
|
||||
}).Debug("Recovering state for epoch boundary cache")
|
||||
|
||||
hState := s.hotStateCache.get([32]byte(bRoot))
|
||||
if err := s.epochBoundaryStateCache.put([32]byte(bRoot), hState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On an intermediate slot, save state summary.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
@@ -137,6 +138,34 @@ func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) {
|
||||
require.Equal(t, false, beaconDB.HasState(ctx, r))
|
||||
}
|
||||
|
||||
func TestSaveState_RecoverForEpochBoundary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
r := [32]byte{'A'}
|
||||
boundaryRoot := [32]byte{'B'}
|
||||
require.NoError(t, beaconState.UpdateBlockRootAtIndex(0, boundaryRoot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
gRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
// Save boundary state to the hot state cache.
|
||||
boundaryState, _ := util.DeterministicGenesisState(t, 32)
|
||||
service.hotStateCache.put(boundaryRoot, boundaryState)
|
||||
require.NoError(t, service.SaveState(ctx, r, beaconState))
|
||||
|
||||
rInfo, ok, err := service.epochBoundaryStateCache.getByBlockRoot(boundaryRoot)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok, "state does not exist in cache")
|
||||
assert.Equal(t, rInfo.root, boundaryRoot, "incorrect root of root state info")
|
||||
assert.Equal(t, rInfo.state.Slot(), primitives.Slot(0), "incorrect slot of state")
|
||||
}
|
||||
|
||||
func TestSaveState_CanSaveHotStateToDB(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -3,12 +3,15 @@ package stateutil
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,7 +33,7 @@ func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
|
||||
}
|
||||
|
||||
func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
roots, err := optimizedValidatorRoots(validators)
|
||||
roots, err := OptimizedValidatorRoots(validators)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -51,19 +54,46 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func optimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < groupSize; i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not get validator field roots")
|
||||
return
|
||||
}
|
||||
for k, root := range fRoots {
|
||||
roots[(j*groupSize+i)*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
|
||||
// derive a list of validator roots from a list of validator objects.
|
||||
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
// Exit early if no validators are provided.
|
||||
if len(validators) == 0 {
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
roots := make([][32]byte, 0, len(validators)*validatorFieldRoots)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
n := runtime.GOMAXPROCS(0)
|
||||
rootsSize := len(validators) * validatorFieldRoots
|
||||
groupSize := len(validators) / n
|
||||
roots := make([][32]byte, rootsSize)
|
||||
wg.Add(n - 1)
|
||||
for j := 0; j < n-1; j++ {
|
||||
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
|
||||
}
|
||||
for i := (n - 1) * groupSize; i < len(validators); i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[i])
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
roots = append(roots, fRoots...)
|
||||
for k, root := range fRoots {
|
||||
roots[i*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// A validator's tree can represented with a depth of 3. As log2(8) = 3
|
||||
// Using this property we can lay out all the individual fields of a
|
||||
@@ -71,9 +101,7 @@ func optimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
|
||||
for i := 0; i < validatorTreeDepth; i++ {
|
||||
// Overwrite input lists as we are hashing by level
|
||||
// and only need the highest level to proceed.
|
||||
outputLen := len(roots) / 2
|
||||
htr.VectorizedSha256(roots, roots)
|
||||
roots = roots[:outputLen]
|
||||
roots = htr.VectorizedSha256(roots)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package stateutil
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorConstants(t *testing.T) {
|
||||
@@ -30,3 +32,28 @@ func TestValidatorConstants(t *testing.T) {
|
||||
_, err := ValidatorRegistryRoot([]*ethpb.Validator{v})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHashValidatorHelper(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
v := ðpb.Validator{}
|
||||
valList := make([]*ethpb.Validator, 10*validatorFieldRoots)
|
||||
for i := range valList {
|
||||
valList[i] = v
|
||||
}
|
||||
roots := make([][32]byte, len(valList))
|
||||
hashValidatorHelper(valList, roots, 2, 2, &wg)
|
||||
for i := 0; i < 4*validatorFieldRoots; i++ {
|
||||
require.Equal(t, [32]byte{}, roots[i])
|
||||
}
|
||||
emptyValRoots, err := ValidatorFieldRoots(v)
|
||||
require.NoError(t, err)
|
||||
for i := 4; i < 6; i++ {
|
||||
for j := 0; j < validatorFieldRoots; j++ {
|
||||
require.Equal(t, emptyValRoots[j], roots[i*validatorFieldRoots+j])
|
||||
}
|
||||
}
|
||||
for i := 6 * validatorFieldRoots; i < 10*validatorFieldRoots; i++ {
|
||||
require.Equal(t, [32]byte{}, roots[i])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,8 +48,7 @@ func merkleizePubkey(pubkey []byte) ([32]byte, error) {
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
outputChunk := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(chunks, outputChunk)
|
||||
outputChunk := htr.VectorizedSha256(chunks)
|
||||
|
||||
return outputChunk[0], nil
|
||||
}
|
||||
|
||||
@@ -71,9 +71,7 @@ func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
|
||||
}
|
||||
|
||||
layers[i+1] = make([]*[32]byte, layerLen/2)
|
||||
newElems := make([][32]byte, layerLen/2)
|
||||
htr.VectorizedSha256(elements, newElems)
|
||||
elements = newElems
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
for j := range elements {
|
||||
layers[i+1][j] = &elements[j]
|
||||
}
|
||||
@@ -295,9 +293,7 @@ func MerkleizeTrieLeaves(layers [][][32]byte, hashLayer [][32]byte) ([][][32]byt
|
||||
if !math.IsPowerOf2(uint64(len(hashLayer))) {
|
||||
return nil, nil, errors.Errorf("hash layer is a non power of 2: %d", len(hashLayer))
|
||||
}
|
||||
newLayer := make([][32]byte, len(hashLayer)/2)
|
||||
htr.VectorizedSha256(hashLayer, newLayer)
|
||||
hashLayer = newLayer
|
||||
hashLayer = htr.VectorizedSha256(hashLayer)
|
||||
layers[i] = hashLayer
|
||||
i++
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
}
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Info("error in BlocksByRange batch")
|
||||
log.WithError(err).Debug("error in BlocksByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -139,7 +139,7 @@ func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch,
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Error("Could not send a chunked response")
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
return chunkErr
|
||||
}
|
||||
}
|
||||
|
||||
17
cache/nonblocking/BUILD.bazel
vendored
Normal file
17
cache/nonblocking/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"list.go",
|
||||
"lru.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/cache/nonblocking",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["lru_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
123
cache/nonblocking/list.go
vendored
Normal file
123
cache/nonblocking/list.go
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE_list file.
|
||||
package nonblocking
|
||||
|
||||
// entry is an LRU entry
|
||||
type entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *lruList[K, V]
|
||||
|
||||
// The LRU key of this element.
|
||||
key K
|
||||
|
||||
// The value stored with this element.
|
||||
value V
|
||||
}
|
||||
|
||||
// lruList represents a doubly linked list.
|
||||
// The zero value for lruList is an empty list ready to use.
|
||||
type lruList[K comparable, V any] struct {
|
||||
root entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// init initializes or clears list l.
|
||||
func (l *lruList[K, V]) init() *lruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// newList returns an initialized list.
|
||||
func newList[K comparable, V any]() *lruList[K, V] { return new(lruList[K, V]).init() }
|
||||
|
||||
// length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *lruList[K, V]) length() int { return l.len }
|
||||
|
||||
// back returns the last element of list l or nil if the list is empty.
|
||||
func (l *lruList[K, V]) back() *entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *lruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *lruList[K, V]) insert(e, at *entry[K, V]) *entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *lruList[K, V]) insertValue(k K, v V, at *entry[K, V]) *entry[K, V] {
|
||||
return l.insert(&entry[K, V]{value: v, key: k}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len
|
||||
func (l *lruList[K, V]) remove(e *entry[K, V]) V {
|
||||
// If already removed, do nothing.
|
||||
if e.prev == nil && e.next == nil {
|
||||
return e.value
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (*lruList[K, V]) move(e, at *entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// pushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *lruList[K, V]) pushFront(k K, v V) *entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, &l.root)
|
||||
}
|
||||
|
||||
// moveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *lruList[K, V]) moveToFront(e *entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
||||
135
cache/nonblocking/lru.go
vendored
Normal file
135
cache/nonblocking/lru.go
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
package nonblocking
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback[K comparable, V any] func(key K, value V)
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU[K comparable, V any] struct {
|
||||
itemsLock sync.RWMutex
|
||||
evictListLock sync.RWMutex
|
||||
size int
|
||||
evictList *lruList[K, V]
|
||||
items map[K]*entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("must provide a positive size")
|
||||
}
|
||||
|
||||
c := &LRU[K, V]{
|
||||
size: size,
|
||||
evictList: newList[K, V](),
|
||||
items: make(map[K]*entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
// Check for existing item
|
||||
c.itemsLock.RLock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(ent)
|
||||
c.evictListLock.Unlock()
|
||||
ent.value = value
|
||||
return false
|
||||
}
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
// Add new item
|
||||
c.evictListLock.Lock()
|
||||
ent := c.evictList.pushFront(key, value)
|
||||
c.evictListLock.Unlock()
|
||||
|
||||
c.itemsLock.Lock()
|
||||
c.items[key] = ent
|
||||
c.itemsLock.Unlock()
|
||||
|
||||
c.evictListLock.RLock()
|
||||
evict := c.evictList.length() > c.size
|
||||
c.evictListLock.RUnlock()
|
||||
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.itemsLock.RLock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
// Make this get function non-blocking for multiple readers.
|
||||
go func() {
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(ent)
|
||||
c.evictListLock.Unlock()
|
||||
}()
|
||||
|
||||
return ent.value, true
|
||||
}
|
||||
c.itemsLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU[K, V]) Len() int {
|
||||
c.evictListLock.RLock()
|
||||
defer c.evictListLock.RUnlock()
|
||||
return c.evictList.length()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU[K, V]) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) removeOldest() {
|
||||
c.evictListLock.RLock()
|
||||
if ent := c.evictList.back(); ent != nil {
|
||||
c.evictListLock.RUnlock()
|
||||
c.removeElement(ent)
|
||||
return
|
||||
}
|
||||
c.evictListLock.RUnlock()
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU[K, V]) removeElement(e *entry[K, V]) {
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.remove(e)
|
||||
c.evictListLock.Unlock()
|
||||
|
||||
c.itemsLock.Lock()
|
||||
delete(c.items, e.key)
|
||||
c.itemsLock.Unlock()
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(e.key, e.value)
|
||||
}
|
||||
}
|
||||
115
cache/nonblocking/lru_test.go
vendored
Normal file
115
cache/nonblocking/lru_test.go
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package nonblocking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLRU_Concurrency(t *testing.T) {
|
||||
onEvicted := func(_ int, _ int) {}
|
||||
size := 20
|
||||
cache, err := NewLRU(size, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
|
||||
defer cancel()
|
||||
for i := 0; i < 100; i++ {
|
||||
go func(j int) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
cache.Add(j, j)
|
||||
cache.Get(j)
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func TestLRU_Eviction(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(_ int, _ int) {
|
||||
evictCounter++
|
||||
}
|
||||
size := 20
|
||||
cache, err := NewLRU(size, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
cache.Add(i, i)
|
||||
cache.Get(i)
|
||||
}
|
||||
cache.Add(20, 20)
|
||||
if evictCounter != 1 {
|
||||
t.Fatalf("should have evicted 1 element: %d", evictCounter)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Add returns true/false if an eviction occurred
|
||||
func TestLRU_Add(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(_ int, _ int) {
|
||||
evictCounter++
|
||||
}
|
||||
l, err := NewLRU(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Resize can upsize and downsize
|
||||
func TestLRU_Resize(t *testing.T) {
|
||||
onEvictCounter := 0
|
||||
onEvicted := func(k int, v int) {
|
||||
onEvictCounter++
|
||||
}
|
||||
l, err := NewLRU(2, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Downsize
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
evicted := l.Resize(1)
|
||||
if evicted != 1 {
|
||||
t.Errorf("1 element should have been evicted: %v", evicted)
|
||||
}
|
||||
if onEvictCounter != 1 {
|
||||
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if _, ok := l.Get(1); ok {
|
||||
t.Errorf("Element 1 should have been evicted")
|
||||
}
|
||||
|
||||
// Upsize
|
||||
evicted = l.Resize(2)
|
||||
if evicted != 0 {
|
||||
t.Errorf("0 elements should have been evicted: %v", evicted)
|
||||
}
|
||||
|
||||
l.Add(4, 4)
|
||||
if _, ok := l.Get(3); !ok {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
if _, ok := l.Get(4); !ok {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
}
|
||||
@@ -202,11 +202,7 @@ var (
|
||||
Usage: "Sets the maximum number of headers that a deposit log query can fetch.",
|
||||
Value: uint64(1000),
|
||||
}
|
||||
// EnableRegistrationCache a temporary flag for enabling the validator registration cache instead of db.
|
||||
EnableRegistrationCache = &cli.BoolFlag{
|
||||
Name: "enable-registration-cache",
|
||||
Usage: "A temporary flag for enabling the validator registration cache instead of persisting in db. The cache will clear on restart.",
|
||||
}
|
||||
|
||||
// WeakSubjectivityCheckpoint defines the weak subjectivity checkpoint the node must sync through to defend against long range attacks.
|
||||
WeakSubjectivityCheckpoint = &cli.StringFlag{
|
||||
Name: "weak-subjectivity-checkpoint",
|
||||
|
||||
@@ -58,7 +58,6 @@ var appFlags = []cli.Flag{
|
||||
flags.InteropGenesisTimeFlag,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
flags.EnableDebugRPCEndpoints,
|
||||
flags.EnableRegistrationCache,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.HistoricalSlasherNode,
|
||||
flags.ChainID,
|
||||
@@ -74,6 +73,7 @@ var appFlags = []cli.Flag{
|
||||
flags.MaxBuilderEpochMissedSlots,
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.LocalBlockValueBoost,
|
||||
cmd.BackupWebhookOutputDir,
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
|
||||
@@ -8,7 +8,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//cmd/beacon-chain/sync/checkpoint:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/node"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/genesis"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/checkpoint"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -28,6 +30,10 @@ var (
|
||||
func BeaconNodeOptions(c *cli.Context) (node.Option, error) {
|
||||
statePath := c.Path(StatePath.Name)
|
||||
remoteURL := c.String(BeaconAPIURL.Name)
|
||||
if remoteURL == "" && c.String(checkpoint.RemoteURL.Name) != "" {
|
||||
log.Infof("using checkpoint sync url %s for value in --%s flag", c.String(checkpoint.RemoteURL.Name), BeaconAPIURL.Name)
|
||||
remoteURL = c.String(checkpoint.RemoteURL.Name)
|
||||
}
|
||||
if remoteURL != "" {
|
||||
return func(node *node.BeaconNode) error {
|
||||
var err error
|
||||
|
||||
@@ -113,7 +113,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.BlockBatchLimit,
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.EnableDebugRPCEndpoints,
|
||||
flags.EnableRegistrationCache,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.HistoricalSlasherNode,
|
||||
flags.ChainID,
|
||||
@@ -126,6 +125,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.SlasherDirFlag,
|
||||
flags.LocalBlockValueBoost,
|
||||
checkpoint.BlockPath,
|
||||
checkpoint.StatePath,
|
||||
checkpoint.RemoteURL,
|
||||
|
||||
@@ -32,6 +32,12 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
deprecatedEnableRegistrationCache = &cli.BoolFlag{
|
||||
Name: "enable-registration-cache",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
@@ -41,6 +47,7 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedEnableReorgLateBlocks,
|
||||
deprecatedDisableGossipBatchAggregation,
|
||||
deprecatedBuildBlockParallel,
|
||||
deprecatedEnableRegistrationCache,
|
||||
}
|
||||
|
||||
// deprecatedBeaconFlags contains flags that are still used by other components
|
||||
|
||||
@@ -148,6 +148,13 @@ var (
|
||||
Name: "disable-resource-manager",
|
||||
Usage: "Disables running the libp2p resource manager",
|
||||
}
|
||||
|
||||
// DisableRegistrationCache a flag for disabling the validator registration cache and use db instead.
|
||||
DisableRegistrationCache = &cli.BoolFlag{
|
||||
Name: "diable-registration-cache",
|
||||
Usage: "A temporary flag for disabling the validator registration cache instead of using the db. note: registrations do not clear on restart while using the db",
|
||||
}
|
||||
|
||||
aggregateParallel = &cli.BoolFlag{
|
||||
Name: "aggregate-parallel",
|
||||
Usage: "Enables parallel aggregation of attestations",
|
||||
@@ -204,6 +211,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
aggregateSecondInterval,
|
||||
aggregateThirdInterval,
|
||||
disableResourceManager,
|
||||
DisableRegistrationCache,
|
||||
aggregateParallel,
|
||||
}...)...)
|
||||
|
||||
|
||||
@@ -84,6 +84,15 @@ type ProposerSettings struct {
|
||||
DefaultConfig *ProposerOption
|
||||
}
|
||||
|
||||
// ShouldBeSaved goes through checks to see if the value should be saveable
|
||||
// Pseudocode: conditions for being saved into the database
|
||||
// 1. settings are not nil
|
||||
// 2. proposeconfig is not nil (this defines specific settings for each validator key), default config can be nil in this case and fall back to beacon node settings
|
||||
// 3. defaultconfig is not nil, meaning it has at least fee recipient settings (this defines general settings for all validator keys but keys will use settings from propose config if available), propose config can be nil in this case
|
||||
func (settings *ProposerSettings) ShouldBeSaved() bool {
|
||||
return settings != nil && (settings.ProposeConfig != nil || settings.DefaultConfig != nil && settings.DefaultConfig.FeeRecipientConfig != nil)
|
||||
}
|
||||
|
||||
// ToPayload converts struct to ProposerSettingsPayload
|
||||
func (ps *ProposerSettings) ToPayload() *validatorpb.ProposerSettingsPayload {
|
||||
if ps == nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user