Compare commits

..

22 Commits

Author SHA1 Message Date
nisdas
0ffccc9c98 add in changes 2023-06-21 06:47:07 +08:00
nisdas
1b915b51b0 Merge branch 'fcTesting' of https://github.com/prysmaticlabs/geth-sharding into fcTesting2 2023-06-21 06:30:30 +08:00
nisdas
d35461affd clean up logs 2023-06-21 06:30:08 +08:00
nisdas
ee159f3380 remove logs 2023-06-21 06:22:33 +08:00
nisdas
6b3d18cb77 remove logs 2023-06-21 06:21:56 +08:00
nisdas
07955c891b Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into fakeProposerBranch 2023-06-21 06:19:16 +08:00
nisdas
cf0505b8db potuz's suggestion 2023-06-19 22:19:56 +08:00
nisdas
3a9764d3af potuz's suggestion 2023-06-19 20:01:13 +08:00
nisdas
d1d3edc7fe time locks 2023-06-19 18:30:26 +08:00
nisdas
bd0d7478b3 fix panic 2023-05-26 13:23:52 +08:00
nisdas
b6a1da21f4 add logs 2023-05-26 13:15:57 +08:00
nisdas
180058ed48 fix corruption 2023-05-25 16:27:14 +08:00
nisdas
f7a567d1d3 only have it for late blocks 2023-05-25 08:21:34 +08:00
nisdas
6d02c9ae12 add new thing 2023-05-25 08:15:56 +08:00
nisdas
6c2e6ca855 add error 2023-05-24 22:39:08 +08:00
nisdas
fbdccf8055 handle zero 2023-05-24 22:38:27 +08:00
nisdas
83cfe11ca0 error 2023-05-24 22:29:27 +08:00
nisdas
135e9f51ec force proposer payloads to be included 2023-05-24 22:26:27 +08:00
nisdas
d33c1974da add logs 2023-05-23 19:06:59 +08:00
nisdas
88a2e3d953 fix panic 2023-05-23 18:07:05 +08:00
nisdas
cea42a4b7d prepare all payloads 2023-05-23 17:53:54 +08:00
nisdas
9971d71bc5 add changes 2023-05-23 17:36:54 +08:00
180 changed files with 1791 additions and 8153 deletions

View File

@@ -3,7 +3,6 @@ import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
@@ -15,7 +14,7 @@ coverage --define=coverage_enabled=1
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false --define blst_modern=true
build --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -28,7 +27,30 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
@@ -37,13 +59,9 @@ build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# Abseil requires c++14 or greater.
build --cxxopt=-std=c++20
build --host_cxxopt=-std=c++20

View File

@@ -1 +1 @@
6.2.1
6.1.0

View File

@@ -3,6 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"

View File

@@ -16,37 +16,27 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "57f03a6c29793e8add7bd64186fc8066d23b5ffd06fe9cc6b0b8c499914d3a65",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
],
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
zig_toolchains()
bazel_toolchain_dependencies()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
)
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
configure_nonhermetic_darwin()
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
@@ -321,13 +311,11 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
http_archive(
git_repository(
name = "com_google_protobuf",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
strip_prefix = "protobuf-23.3",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
],
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1617835118 -0700",
)
# Group the sources of the library so that CMake rule have access to it

View File

@@ -128,7 +128,6 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -227,7 +226,6 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -401,7 +399,6 @@ func TestDownloadFinalizedData(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))

View File

@@ -499,13 +499,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -41,15 +41,13 @@ var (
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
lastValidHash [32]byte
root [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
LastValidHash() [32]byte
}
// BlockRoot returns the invalid block root.
@@ -57,11 +55,6 @@ func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// LastValidHash returns the last valid hash root.
func (e invalidBlock) LastValidHash() [32]byte {
return e.lastValidHash
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
@@ -79,19 +72,6 @@ func IsInvalidBlock(e error) bool {
return true
}
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
// doesn't have a last valid hash, [32]byte{} is returned.
func InvalidBlockLVH(e error) [32]byte {
if e == nil {
return [32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [32]byte{}
}
return d.LastValidHash()
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {

View File

@@ -71,7 +71,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
switch err {
@@ -182,24 +181,21 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
@@ -223,37 +219,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
}
case execution.ErrInvalidBlockHashPayloadStatus:
newPayloadInvalidNodeCount.Inc()
return false, ErrInvalidBlockHashPayloadStatus
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {

View File

@@ -525,13 +525,11 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "phase 0 post state",
postState: phase0State,
blk: altairBlk, // same as phase 0 for this test
isValidPayload: true,
},
{
name: "altair post state",
postState: altairState,
blk: altairBlk,
isValidPayload: true,
},
{
@@ -745,37 +743,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
require.Equal(t, true, validated)
}
func Test_reportInvalidBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
jcp := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx

View File

@@ -172,15 +172,11 @@ var (
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in validateStateTransition()",
})
chainServiceProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "chain_service_processing_milliseconds",
Help: "Total time to call a chain service in ReceiveBlock()",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{

View File

@@ -22,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -39,11 +40,59 @@ const depositDeadline = 20 * time.Second
// This defines size of the upper bound for initial sync block cache.
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// postBlockProcess is called when a gossip block is received. This function performs
// several duties most importantly informing the engine if head was updated,
// saving the new head information to the blockchain package and database and
// handling attestations, slashings and similar included in the block.
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
// The block's signing root should be computed before calling this method to avoid redundant
// computation in this method and methods it calls into.
//
// Spec pseudocode definition:
//
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
//
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
//
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
//
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
@@ -52,7 +101,52 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
startTime := time.Now()
b := signed.Block()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return err
}
// Verify that the parent block is in forkchoice
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
}
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
if err != nil {
return errors.Wrap(err, "could not validate new payload")
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
@@ -66,6 +160,34 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
}
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
tracing.AnnotateError(span, err)
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
@@ -118,6 +240,50 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
},
})
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
// Save finalized check point to db and more.
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isOptimistic,
},
})
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
defer cancel()
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
@@ -243,7 +409,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
return err
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -332,21 +498,31 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
e := coreTime.CurrentEpoch(copied)
if err := helpers.UpdateProposerIndicesInCache(ctx, copied, e); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateProposerIndicesInCache(slotCtx, copied, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
if s.nextEpochBoundarySlot != 0 {
ep := slots.ToEpoch(s.nextEpochBoundarySlot)
_, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, copied, ep)
if err != nil {
return err
}
}()
for k, v := range nextProposerIndexToSlots {
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(v[0], k, [8]byte{}, [32]byte{})
}
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
postState = postState.Copy()
if s.nextEpochBoundarySlot != 0 {
ep := slots.ToEpoch(s.nextEpochBoundarySlot)
_, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, postState, ep)
if err != nil {
return err
}
for k, v := range nextProposerIndexToSlots {
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(v[0], k, [8]byte{}, [32]byte{})
}
}
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
@@ -357,7 +533,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
@@ -369,9 +545,27 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
}
return nil
}
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
}
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
@@ -556,10 +750,3 @@ func (s *Service) waitForSync() error {
return errors.New("context closed, exiting goroutine")
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
@@ -210,44 +209,35 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}
// inserts finalized deposits into our finalized deposit trie, needs to be
// called in the background
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
// inserts finalized deposits into our finalized deposit trie.
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
defer span.End()
startTime := time.Now()
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
log.WithError(err).Error("could not fetch finalized state")
return
return errors.Wrap(err, "could not fetch finalized state")
}
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
log.WithError(err).Error("could not cast eth1 deposit index")
return
return errors.Wrap(err, "could not cast eth1 deposit index")
}
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
finalizedEth1DepIdx := eth1DepositIndex - 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not insert finalized deposits")
return
eth1DepositIndex -= 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not prune deposit proofs")
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
// to the provided eth1 deposit index.
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
return nil
}
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling

View File

@@ -41,6 +41,103 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
ojc := &ethpb.Checkpoint{}
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
util.SaveBlock(t, ctx, beaconDB, random)
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
s state.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
s: st.Copy(),
wantErrString: "could not reconstruct parent state",
},
{
name: "block is from the future",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot2
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
}(),
s: st.Copy(),
wantErrString: "is in the far distant future",
},
{
name: "could not get finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
b.Block.Slot = 2
return b
}(),
s: st.Copy(),
wantErrString: "not descendant of finalized checkpoint",
},
{
name: "same slot as finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.Slot = 0
b.Block.ParentRoot = randomParentRoot2
return b
}(),
s: st.Copy(),
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fRoot := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
}
func TestStore_OnBlockBatch(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
@@ -560,20 +657,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, fcs.NewSlot(ctx, i))
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -608,20 +692,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -643,7 +714,8 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
err := service.onBlock(tr.ctx, nil, [32]byte{})
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -657,11 +729,11 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
err = service.onBlock(ctx, wsb, r)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -685,13 +757,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -717,7 +783,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
@@ -726,45 +792,6 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
}
func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
var zeroSig [96]byte
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
depositCache.InsertPendingDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
pendingDeps := depositCache.PendingContainers(ctx, nil)
for _, d := range pendingDeps {
assert.DeepEqual(t, true, d.Index >= 8, "Pending deposits were not pruned")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
@@ -792,7 +819,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
// Insert 3 deposits before hand.
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
@@ -802,7 +829,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}
// Insert New Finalized State with higher deposit count.
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
@@ -1104,35 +1131,19 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
var wg sync.WaitGroup
wg.Add(4)
go func() {
preState, err := service.getBlockPreState(ctx, wsb1.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb2.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb3.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb4.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
@@ -1200,13 +1211,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1219,12 +1224,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1238,12 +1238,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1260,12 +1255,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1288,12 +1278,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
@@ -1316,13 +1301,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1379,12 +1358,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1397,12 +1371,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1416,13 +1385,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1439,12 +1402,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1467,12 +1425,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
// not finish importing and it was never imported to forkchoice). Check
@@ -1495,12 +1448,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1558,13 +1506,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1577,13 +1519,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1597,12 +1533,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1624,12 +1555,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
invalidRoots[i-13], err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
err = service.onBlock(ctx, wsb, invalidRoots[i-13])
require.NoError(t, err)
}
// Check that we have justified the second epoch
@@ -1650,12 +1576,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1689,12 +1610,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
require.NoError(t, service.onBlock(ctx, wsb, root))
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1712,12 +1628,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1737,13 +1648,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1794,12 +1699,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1812,12 +1712,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1831,12 +1726,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1857,18 +1747,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1887,11 +1766,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that the headroot/state are not in DB and restart the node
@@ -1973,12 +1848,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
st, err = service.HeadState(ctx)
require.NoError(t, err)

View File

@@ -94,8 +94,10 @@ func (s *Service) spawnProcessAttestationsRoutine() {
case <-s.ctx.Done():
return
case <-pat.C():
log.Infof("proposer_mocker: calling updated head via offset ticker")
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
log.Infof("proposer_mocker: calling updated head via normal slot ticker in spawn atts")
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
@@ -124,6 +126,8 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
s.processAttestations(ctx, disparity)
log.Infof("proposer_mocker: process attestations in fc took %s", time.Since(start).String())
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
start = time.Now()
@@ -136,11 +140,14 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
s.headLock.RUnlock()
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
log.Infof("proposer_mocker: head root in fc took %s", time.Since(start).String())
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
if err != nil {
log.WithError(err).Error("could not update forkchoice")
}
log.Infof("proposer_mocker: fcu call in fc took %s", time.Since(start).String())
if changed {
s.headLock.RLock()
log.WithFields(logrus.Fields{

View File

@@ -128,13 +128,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -184,13 +178,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -7,18 +7,11 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -54,65 +47,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
postState, err := s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
isValidPayload, err := s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
// The rest of block processing takes a lock on forkchoice.
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
// Apply state transition on the new block.
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return err
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(ctx, blockCopy, postState, finalized)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
cancel()
}()
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
}
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
log.WithError(err).Error("Could not prune canonical objects from pool ")
@@ -143,8 +86,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
log.WithError(err).Error("Unable to log state transition data")
}
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
return nil
}
@@ -285,109 +226,3 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}
// This performs the state transition function and returns the poststate or an
// error if the block fails to verify the consensus rules
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
b := signed.Block()
// Verify that the parent block is in forkchoice
parentRoot := b.ParentRoot()
if !s.InForkchoice(parentRoot) {
return nil, ErrNotDescendantOfFinalized
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return nil, invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
return postState, nil
}
// updateJustificationOnBlock updates the justified checkpoint on DB if the
// incoming block has updated it on forkchoice.
func (s *Service) updateJustificationOnBlock(ctx context.Context, preState, postState state.BeaconState, preJustifiedEpoch primitives.Epoch) error {
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > preJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
return nil
}
// updateFinalizationOnBlock performs some duties when the incoming block
// changes the finalized checkpoint. It returns true when this has happened.
func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postState state.BeaconState, preFinalizedEpoch primitives.Epoch) (bool, error) {
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > preFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return true, err
}
return true, nil
}
return false, nil
}
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState, finalized *forkchoicetypes.Checkpoint) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
isValidPayload = s.head.optimistic
}
s.headLock.RUnlock()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isValidPayload,
},
})
}
// sendBlockAttestationsToSlasher sends the incoming block's attestation to the slasher
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
return isValidPayload, err
}
}
return isValidPayload, nil
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
@@ -409,7 +408,7 @@ func (s *Service) initializeBeaconChain(
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}

View File

@@ -377,7 +377,9 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -451,7 +453,9 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -13,15 +13,6 @@ import (
"go.opencensus.io/trace"
)
// AttDelta contains rewards and penalties for a single attestation.
type AttDelta struct {
HeadReward uint64
SourceReward uint64
SourcePenalty uint64
TargetReward uint64
TargetPenalty uint64
}
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
@@ -235,7 +226,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attDeltas, err := AttestationsDelta(beaconState, bal, vals)
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
@@ -246,12 +237,11 @@ func ProcessRewardsAndPenaltiesPrecompute(
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
delta := attDeltas[i]
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], delta.HeadReward+delta.SourceReward+delta.TargetReward)
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
@@ -265,8 +255,10 @@ func ProcessRewardsAndPenaltiesPrecompute(
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
attDeltas := make([]*AttDelta, len(vals))
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
@@ -280,29 +272,29 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
bias := cfg.InactivityScoreBias
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
if err != nil {
return nil, err
return nil, nil, err
}
inactivityDenominator := bias * inactivityPenaltyQuotient
for i, v := range vals {
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, err
return nil, nil, err
}
}
return attDeltas, nil
return rewards, penalties, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (*AttDelta, error) {
inactivityLeak bool) (reward, penalty uint64, err error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return &AttDelta{}, nil
return 0, 0, nil
}
cfg := params.BeaconConfig()
@@ -315,32 +307,32 @@ func attestationDelta(
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
attDelta := &AttDelta{}
reward, penalty = uint64(0), uint64(0)
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
attDelta.SourceReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.SourcePenalty += baseReward * srcWeight / weightDenominator
penalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
attDelta.TargetReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.TargetPenalty += baseReward * tgtWeight / weightDenominator
penalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
attDelta.HeadReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
}
@@ -349,10 +341,10 @@ func attestationDelta(
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return &AttDelta{}, err
return 0, 0, err
}
attDelta.TargetPenalty += n / inactivityDenominator
penalty += n / inactivityDenominator
}
return attDelta, nil
return reward, penalty, nil
}

View File

@@ -213,16 +213,9 @@ func TestAttestationsDelta(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -251,16 +244,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -299,15 +285,8 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
}
wanted := make([]uint64, s.NumValidators())
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
for i := range rewards {
wanted[i] += rewards[i]
}

View File

@@ -195,7 +195,6 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
}
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
return err
@@ -224,12 +223,13 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
syncErr := fmt.Errorf(
"sync message time %v (message slot %d) not within allowable range of %v to %v (current slot %d)",
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
messageTime,
slot,
lowerBound,
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
upperBound,
currentSlot,
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
)
// Wrap error message if sync message is too late.
if messageTime.Before(lowerBound) {

View File

@@ -311,7 +311,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 16,
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
},
wantedErr: "(message slot 16) not within allowable range of",
wantedErr: "(slot 16) not within allowable range of",
},
{
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
@@ -327,7 +327,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 100,
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
},
wantedErr: "(message slot 100) not within allowable range of",
wantedErr: "(slot 100) not within allowable range of",
},
{
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
@@ -343,7 +343,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 101,
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "(message slot 101) not within allowable range of",
wantedErr: "(slot 101) not within allowable range of",
},
{
name: "sync_message.slot is well beyond current slot",

View File

@@ -141,7 +141,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # FillFwd sweep by the max length of the sweep if there was not a full set of withdrawals
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index

View File

@@ -336,21 +336,20 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
}
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
// Input state is used to retrieve active validator indices.
// Input epoch is the epoch to retrieve proposer indices for.
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState) error {
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
// Which is the reason why we skip genesis epoch.
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
if time.CurrentEpoch(state) <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
// Use state root from (current_epoch - 1))
s, err := slots.EpochEnd(epoch - 1)
wantedEpoch := time.PrevEpoch(state)
s, err := slots.EpochEnd(wantedEpoch)
if err != nil {
return err
}
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
r, err := StateRootAtSlot(state, s)
if err != nil {
return err
}
@@ -367,11 +366,11 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
return nil
}
indices, err := ActiveValidatorIndices(ctx, state, epoch)
indices, err := ActiveValidatorIndices(ctx, state, time.CurrentEpoch(state))
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
@@ -433,10 +432,11 @@ func computeCommittee(
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
e := time.CurrentEpoch(state)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return nil, errors.Wrap(err, "could not generate seed")

View File

@@ -639,7 +639,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := precomputeProposerIndices(state, indices)
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex

View File

@@ -262,7 +262,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
}

View File

@@ -205,17 +205,3 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
Root: bRoot,
}, nil
}
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// TODO: ask around to understand why the FAQ section of the p2p spec shows
// multiplying and dividing the churn limit quotient by the max safety decay value of 100,
// but in the definition of the constant, the simpler equation below is used.
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
}

View File

@@ -281,19 +281,3 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
return beaconState
}
func TestMinEpochsForBlockRequests(t *testing.T) {
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
// )
//
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
}

View File

@@ -16,7 +16,6 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/backup:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -57,7 +56,7 @@ type ReadOnlyDatabase interface {
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -108,7 +107,7 @@ type HeadAccessDatabase interface {
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"archived_point.go",
"backfill.go",
"backup.go",
"blocks.go",
"checkpoint.go",
@@ -49,7 +48,6 @@ go_library(
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
@@ -75,7 +73,6 @@ go_test(
name = "go_default_test",
srcs = [
"archived_point_test.go",
"backfill_test.go",
"backup_test.go",
"blocks_test.go",
"checkpoint_test.go",
@@ -110,7 +107,6 @@ go_test(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -1,39 +0,0 @@
package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bfb, err := proto.Marshal(bf)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillStatusKey, bfb)
})
}
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bf := &dbval.BackfillStatus{}
err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
bs := bucket.Get(backfillStatusKey)
if len(bs) == 0 {
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
}
return proto.Unmarshal(bs, bf)
})
return bf, err
}

View File

@@ -1,35 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
func TestBackfillRoundtrip(t *testing.T) {
db := setupDB(t)
b := &dbval.BackfillStatus{}
b.LowSlot = 23
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
m, err := proto.Marshal(b)
require.NoError(t, err)
ub := &dbval.BackfillStatus{}
require.NoError(t, proto.Unmarshal(m, ub))
require.Equal(t, b.LowSlot, ub.LowSlot)
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
ctx := context.Background()
require.NoError(t, db.SaveBackfillStatus(ctx, b))
dbub, err := db.BackfillStatus(ctx)
require.NoError(t, err)
require.Equal(t, b.LowSlot, dbub.LowSlot)
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
}

View File

@@ -70,6 +70,25 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -398,6 +417,17 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance

View File

@@ -92,6 +92,23 @@ var blockTests = []struct {
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
var expected [32]byte
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := primitives.Slot(20)

View File

@@ -57,8 +57,8 @@ var (
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// tracking data about an ongoing backfill
backfillStatusKey = []byte("backfill-status")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -18,6 +17,18 @@ import (
// syncing, using the provided values as their point of origin. This is an alternative
// to syncing from genesis, and should only be run on an empty database.
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
genesisRoot, err := s.GenesisBlockRoot(ctx)
if err != nil {
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
}
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
}
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
if err != nil {
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
}
cf, err := detect.FromState(serState)
if err != nil {
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
@@ -39,24 +50,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
}
blk := wblk.Block()
// save block
blockRoot, err := blk.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
pr := blk.ParentRoot()
bf := &dbval.BackfillStatus{
LowSlot: uint64(wblk.Block().Slot()),
LowRoot: blockRoot[:],
LowParentRoot: pr[:],
OriginRoot: blockRoot[:],
OriginSlot: uint64(wblk.Block().Slot()),
}
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync.")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")

View File

@@ -39,7 +39,7 @@ func New() *ForkChoice {
b := make([]uint64, 0)
v := make([]Vote, 0)
return &ForkChoice{store: s, balances: b, votes: v}
return &ForkChoice{store: s, balances: b, votes: v, fcLock: new(fcLock)}
}
// NodeCount returns the current number of nodes in the Store.

View File

@@ -7,7 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
@@ -16,7 +16,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, la
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
// return early if the parent is LVH
if node.payloadHash == lastValidHash {
if node.payloadHash == payloadHash {
return invalidRoots, nil
}
} else {
@@ -28,7 +28,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, la
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}

View File

@@ -1,7 +1,11 @@
package doublylinkedtree
import (
"bytes"
"runtime/debug"
"runtime/pprof"
"sync"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
@@ -11,7 +15,7 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
sync.RWMutex
*fcLock
store *Store
votes []Vote // tracks individual validator's last vote.
balances []uint64 // tracks individual validator's balances last accounted in votes.
@@ -68,3 +72,52 @@ type Vote struct {
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch primitives.Epoch // epoch of next voting period.
}
type fcLock struct {
lk sync.RWMutex
t time.Time
currChan chan int
}
func (f *fcLock) Lock() {
f.lk.Lock()
f.t = time.Now()
f.currChan = make(chan int)
go func(t time.Time, c chan int) {
tim := time.NewTimer(3 * time.Second)
select {
case <-c:
tim.Stop()
case <-tim.C:
tim.Stop()
pfile := pprof.Lookup("goroutine")
bf := bytes.NewBuffer([]byte{})
err := pfile.WriteTo(bf, 1)
_ = err
log.Warnf("FC lock is taking longer than 3 seconds with the complete stack of %s", bf.String())
}
}(time.Now(), f.currChan)
}
func (f *fcLock) Unlock() {
t := time.Since(f.t)
f.t = time.Time{}
close(f.currChan)
f.lk.Unlock()
if t > time.Second {
log.Warnf("FC lock is taking longer than 1 second: %s with the complete stack of %s", t.String(), string(debug.Stack()))
}
}
func (f *fcLock) RLock() {
t := time.Now()
f.lk.RLock()
dt := time.Since(t)
if dt > time.Second {
log.Warnf("FC Rlock is taking longer than 1 second: %s with stack %s", dt.String(), string(debug.Stack()))
}
}
func (f *fcLock) RUnlock() {
f.lk.RUnlock()
}

View File

@@ -205,28 +205,21 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
bfs := backfill.NewStatus(beacon.db)
if err := bfs.Reload(ctx); err != nil {
return nil, errors.Wrap(err, "backfill status initialization error")
}
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P())
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
@@ -503,8 +496,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.StatusUpdater, fc forkchoice.ForkChoicer) error {
opts := []stategen.StateGenOption{stategen.WithAvailableBlocker(bfs)}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
sg := stategen.New(b.db, fc, opts...)
cp, err := b.db.FinalizedCheckpoint(ctx)
@@ -994,8 +987,7 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
if cliCtx.Bool(flags.EnableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())
}
svc, err := builder.NewService(b.ctx, opts...)

View File

@@ -47,7 +47,6 @@ go_test(
deps = [
"//async:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -14,7 +14,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
@@ -40,8 +39,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -2,12 +2,9 @@ package kv
import (
"context"
"runtime"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -26,11 +23,21 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
if err != nil {
return err
}
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
// AggregateUnaggregatedAttestationsBySlotIndex aggregates the unaggregated attestations and saves
// newly aggregated attestations in the pool. Unaggregated attestations are filtered by slot and
// committee index.
func (c *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregateUnaggregatedAttestationsBySlotIndex")
defer span.End()
unaggregatedAtts := c.UnaggregatedAttestationsBySlotIndex(ctx, slot, committeeIndex)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
defer span.End()
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
@@ -45,32 +52,26 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
// Aggregate unaggregated attestations from the pool and save them in the pool.
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
if features.Get().AggregateParallel {
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
} else {
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
return err
}
leftOverUnaggregatedAtt[h] = true
}
}
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
@@ -87,58 +88,6 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
return nil
}
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
// returns the unaggregated attestations that weren't able to aggregate.
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
var leftoverLock sync.Mutex
wg := sync.WaitGroup{}
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
ch := make(chan []*ethpb.Attestation, n)
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer wg.Done()
for as := range ch {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
if err != nil {
log.WithError(err).Error("could not aggregate unaggregated attestations")
continue
}
if aggregated == nil {
log.Error("nil aggregated attestation")
continue
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
}
} else {
h, err := hashFn(aggregated)
if err != nil {
log.WithError(err).Error("could not hash attestation")
continue
}
leftoverLock.Lock()
leftOver[h] = true
leftoverLock.Unlock()
}
}
}()
}
for _, as := range atts {
ch <- as
}
close(ch)
wg.Wait()
return leftOver
}
// SaveAggregatedAttestation saves an aggregated attestation in cache.
func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
@@ -216,7 +165,7 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]*ethpb.Attestation, 0)

View File

@@ -9,7 +9,7 @@ import (
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -18,11 +18,6 @@ import (
)
func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
cache := NewAttCaches()
priv, err := bls.RandKey()
require.NoError(t, err)
@@ -44,6 +39,61 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 2, 0)), "Did not aggregate correctly")
}
func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T) {
cache := NewAttCaches()
genData := func(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) *ethpb.AttestationData {
return util.HydrateAttestationData(&ethpb.AttestationData{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
genSign := func() []byte {
priv, err := bls.RandKey()
require.NoError(t, err)
return priv.Sign([]byte{'a'}).Marshal()
}
atts := []*ethpb.Attestation{
// The first slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 3), Signature: genSign()},
// The second slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(2, 4), Signature: genSign()},
}
ctx := context.Background()
// Make sure that no error is produced if aggregation is requested on empty unaggregated list.
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Persist unaggregated attestations, and aggregate on per slot/committee index base.
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
// Committee attestations at a slot should be aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
// Committee attestations haven't been aggregated.
require.Equal(t, 2, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Committee at a second slot is aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 3)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 3)), "Did not aggregate correctly")
// The second committee at second slot is not aggregated.
require.Equal(t, 1, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 4)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 4)), "Did not aggregate correctly")
}
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -15,6 +15,7 @@ import (
type Pool interface {
// For Aggregated attestations
AggregateUnaggregatedAttestations(ctx context.Context) error
AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error
SaveAggregatedAttestation(att *ethpb.Attestation) error
SaveAggregatedAttestations(atts []*ethpb.Attestation) error
AggregatedAttestations() []*ethpb.Attestation

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -18,11 +17,6 @@ import (
)
func TestBatchAttestations_Multiple(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"addr_factory.go",
"assigner.go",
"broadcaster.go",
"config.go",
"connection_gater.go",

View File

@@ -1 +0,0 @@
package p2p

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"assigner.go",
"log.go",
"status.go",
],
@@ -15,7 +14,6 @@ go_library(
deps = [
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -30,7 +28,6 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -1,72 +0,0 @@
package peers
import (
"context"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/sirupsen/logrus"
)
// handshakePollingInterval is a polling interval for checking the number of received handshakes.
var handshakePollingInterval = 5 * time.Second
func NewAssigner(ctx context.Context, s *Status, max int, finalized primitives.Epoch) *Assigner {
return &Assigner{
ctx: ctx,
ps: s,
max: max,
finalized: finalized,
}
}
type Assigner struct {
sync.Mutex
ctx context.Context
ps *Status
max int
finalized primitives.Epoch
}
var ErrInsufficientSuitable = errors.New("no suitable peers")
func (a *Assigner) freshPeers() ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.finalized)
if len(peers) < required {
log.WithFields(logrus.Fields{
"suitable": len(peers),
"required": required}).Info("Unable to assign peer while suitable peers < required ")
return nil, ErrInsufficientSuitable
}
return peers, nil
}
// Assign uses the BestFinalized method to select the best peers that agree on a canonical block
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
// the number of outbound requests to each peer from a given component.
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
best, err := a.freshPeers()
ps := make([]peer.ID, 0, n)
if err != nil {
return nil, err
}
for _, p := range best {
if !busy[p] {
ps = append(ps, p)
if len(ps) == n {
return ps, nil
}
}
}
return ps, nil
}

View File

@@ -108,31 +108,12 @@ func (s *Store) DeletePeerData(pid peer.ID) {
}
// SetTrustedPeers sets our desired trusted peer set.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) SetTrustedPeers(peers []peer.ID) {
for _, p := range peers {
s.trustedPeers[p] = true
}
}
// GetTrustedPeers gets our desired trusted peer ids.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) GetTrustedPeers() []peer.ID {
peers := []peer.ID{}
for p := range s.trustedPeers {
peers = append(peers, p)
}
return peers
}
// DeleteTrustedPeers removes peers from trusted peer set.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) DeleteTrustedPeers(peers []peer.ID) {
for _, p := range peers {
delete(s.trustedPeers, p)
}
}
// Peers returns map of peer data objects.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) Peers() map[peer.ID]*PeerData {

View File

@@ -96,16 +96,4 @@ func TestStore_TrustedPeers(t *testing.T) {
assert.Equal(t, true, store.IsTrustedPeer(pid1))
assert.Equal(t, true, store.IsTrustedPeer(pid2))
assert.Equal(t, true, store.IsTrustedPeer(pid3))
tPeers = store.GetTrustedPeers()
assert.Equal(t, 3, len(tPeers))
store.DeleteTrustedPeers(tPeers)
tPeers = store.GetTrustedPeers()
assert.Equal(t, 0, len(tPeers))
assert.Equal(t, false, store.IsTrustedPeer(pid1))
assert.Equal(t, false, store.IsTrustedPeer(pid2))
assert.Equal(t, false, store.IsTrustedPeer(pid3))
}

View File

@@ -560,9 +560,6 @@ func (p *Status) Prune() {
notBadPeer := func(pid peer.ID) bool {
return !p.isBad(pid)
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
}
type peerResp struct {
pid peer.ID
score float64
@@ -570,8 +567,7 @@ func (p *Status) Prune() {
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
score: p.Scorers().ScoreNoLock(pid),
@@ -612,9 +608,6 @@ func (p *Status) deprecatedPrune() {
notBadPeer := func(peerData *peerdata.PeerData) bool {
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
}
type peerResp struct {
pid peer.ID
badResp int
@@ -622,8 +615,7 @@ func (p *Status) deprecatedPrune() {
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
@@ -920,32 +912,6 @@ func (p *Status) SetTrustedPeers(peers []peer.ID) {
p.store.SetTrustedPeers(peers)
}
// GetTrustedPeers returns a list of all trusted peers' ids
func (p *Status) GetTrustedPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
return p.store.GetTrustedPeers()
}
// DeleteTrustedPeers removes peers from trusted peer set
func (p *Status) DeleteTrustedPeers(peers []peer.ID) {
p.store.Lock()
defer p.store.Unlock()
p.store.DeleteTrustedPeers(peers)
}
// IsTrustedPeers returns if given peer is a Trusted peer
func (p *Status) IsTrustedPeers(pid peer.ID) bool {
p.store.RLock()
defer p.store.RUnlock()
return p.isTrustedPeers(pid)
}
// isTrustedPeers is the lock-free version of IsTrustedPeers.
func (p *Status) isTrustedPeers(pid peer.ID) bool {
return p.store.IsTrustedPeer(pid)
}
// this method assumes the store lock is acquired before
// executing the method.
func (p *Status) isfromBadIP(pid peer.ID) bool {

View File

@@ -802,11 +802,6 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
}
}
p.SetTrustedPeers(trustedPeers)
// Assert we have correct trusted peers
trustedPeers = p.GetTrustedPeers()
assert.Equal(t, 6, len(trustedPeers))
// Assert all peers more than max are prunable.
peersToPrune = p.PeersToPrune()
assert.Equal(t, 16, len(peersToPrune))
@@ -817,34 +812,6 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
assert.NotEqual(t, pid.String(), tPid.String())
}
}
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
for i := 0; i < 9; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Delete trusted peers.
p.DeleteTrustedPeers(trustedPeers)
peersToPrune = p.PeersToPrune()
assert.Equal(t, 25, len(peersToPrune))
// Check that trusted peers are pruned.
for _, tPid := range trustedPeers {
pruned := false
for _, pid := range peersToPrune {
if pid.String() == tPid.String() {
pruned = true
}
}
assert.Equal(t, true, pruned)
}
// Assert have zero trusted peers
trustedPeers = p.GetTrustedPeers()
assert.Equal(t, 0, len(trustedPeers))
for _, pid := range peersToPrune {
dir, err := p.Direction(pid)
require.NoError(t, err)
@@ -854,8 +821,8 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Ensure it is in the descending order.
currScore := p.Scorers().Score(peersToPrune[0])
for _, pid := range peersToPrune {
score := p.Scorers().Score(pid)
assert.Equal(t, true, currScore <= score)
score := p.Scorers().BadResponsesScorer().Score(pid)
assert.Equal(t, true, currScore >= score)
currScore = score
}
}

View File

@@ -174,9 +174,9 @@ func (s *Service) Start() {
s.awaitStateInitialized()
s.isPreGenesis = false
var relayNodes []string
var peersToWatch []string
if s.cfg.RelayNodeAddr != "" {
relayNodes = append(relayNodes, s.cfg.RelayNodeAddr)
peersToWatch = append(peersToWatch, s.cfg.RelayNodeAddr)
if err := dialRelayNode(s.ctx, s.host, s.cfg.RelayNodeAddr); err != nil {
log.WithError(err).Errorf("Could not dial relay node")
}
@@ -213,7 +213,8 @@ func (s *Service) Start() {
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
s.peers.SetTrustedPeers(pids)
s.connectWithAllTrustedPeers(addrs)
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
s.connectWithAllPeers(addrs)
}
// Initialize metadata according to the
// current epoch.
@@ -225,7 +226,7 @@ func (s *Service) Start() {
// Periodic functions.
async.RunEvery(s.ctx, params.BeaconNetworkConfig().TtfbTimeout, func() {
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
ensurePeerConnections(s.ctx, s.host, peersToWatch...)
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
@@ -398,24 +399,6 @@ func (s *Service) awaitStateInitialized() {
}
}
func (s *Service) connectWithAllTrustedPeers(multiAddrs []multiaddr.Multiaddr) {
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {
log.WithError(err).Error("Could not convert to peer address info's from multiaddresses")
return
}
for _, info := range addrInfos {
// add peer into peer status
s.peers.Add(nil, info.ID, info.Addrs[0], network.DirUnknown)
// make each dial non-blocking
go func(info peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
}(info)
}
}
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {

View File

@@ -5,52 +5,28 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
)
// ensurePeerConnections will attempt to reestablish connection to the peers
// if there are currently no connections to that peer.
func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status, relayNodes ...string) {
// every time reset peersToWatch, add RelayNodes and trust peers
var peersToWatch []*peer.AddrInfo
// add RelayNodes
for _, node := range relayNodes {
if node == "" {
func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
if len(peers) == 0 {
return
}
for _, p := range peers {
if p == "" {
continue
}
peerInfo, err := MakePeer(node)
peerInfo, err := MakePeer(p)
if err != nil {
log.WithError(err).Error("Could not make peer")
continue
}
peersToWatch = append(peersToWatch, peerInfo)
}
// add trusted peers
trustedPeers := peers.GetTrustedPeers()
for _, trustedPeer := range trustedPeers {
maddr, err := peers.Address(trustedPeer)
// avoid invalid trusted peers
if err != nil || maddr == nil {
log.WithField("peer", trustedPeers).WithError(err).Error("Could not get peer address")
continue
}
peerInfo := &peer.AddrInfo{ID: trustedPeer}
peerInfo.Addrs = []ma.Multiaddr{maddr}
peersToWatch = append(peersToWatch, peerInfo)
}
if len(peersToWatch) == 0 {
return
}
for _, p := range peersToWatch {
c := h.Network().ConnsToPeer(p.ID)
c := h.Network().ConnsToPeer(peerInfo.ID)
if len(c) == 0 {
if err := connectWithTimeout(ctx, h, p); err != nil {
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
if err := connectWithTimeout(ctx, h, peerInfo); err != nil {
log.WithField("peer", peerInfo.ID).WithField("addrs", peerInfo.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
continue
}
}

View File

@@ -25,19 +25,16 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/prysm/validator:go_default_library",
"//beacon-chain/slasher:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -1,25 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"errors.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
],
)

View File

@@ -1,49 +0,0 @@
package core
import (
"net/http"
"google.golang.org/grpc/codes"
)
type ErrorReason uint8
const (
Internal = iota
Unavailable
BadRequest
// Add more errors as needed
)
type RpcError struct {
Err error
Reason ErrorReason
}
func ErrorReasonToGRPC(reason ErrorReason) codes.Code {
switch reason {
case Internal:
return codes.Internal
case Unavailable:
return codes.Unavailable
case BadRequest:
return codes.InvalidArgument
// Add more cases for other error reasons as needed
default:
return codes.Internal
}
}
func ErrorReasonToHTTP(reason ErrorReason) int {
switch reason {
case Internal:
return http.StatusInternalServerError
case Unavailable:
return http.StatusServiceUnavailable
case BadRequest:
return http.StatusBadRequest
// Add more cases for other error reasons as needed
default:
return http.StatusInternalServerError
}
}

View File

@@ -1,168 +0,0 @@
package core
import (
"context"
"sort"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
func ComputeValidatorPerformance(
ctx context.Context,
req *ethpb.ValidatorPerformanceRequest,
headFetcher blockchain.HeadFetcher,
currSlot primitives.Slot,
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
headState, err := headFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
}
if currSlot > headState.Slot() {
headRoot, err := headFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
if err != nil {
return nil, &RpcError{Err: errors.Wrapf(err, "could not process slots up to %d", currSlot), Reason: Internal}
}
}
var validatorSummary []*precompute.Validator
if headState.Version() == version.Phase0 {
vp, bp, err := precompute.New(ctx, headState)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
validatorSummary = vp
} else if headState.Version() >= version.Altair {
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
if err != nil {
return nil, &RpcError{Err: err, Reason: Internal}
}
validatorSummary = vp
} else {
return nil, &RpcError{Err: errors.Wrapf(err, "head state version %d not supported", headState.Version()), Reason: Internal}
}
responseCap := len(req.Indices) + len(req.PublicKeys)
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
missingValidators := make([][]byte, 0, responseCap)
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
// Convert the list of validator public keys to validator indices and add to the indices set.
for _, pubKey := range req.PublicKeys {
// Skip empty public key.
if len(pubKey) == 0 {
continue
}
pubkeyBytes := bytesutil.ToBytes48(pubKey)
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
// Validator index not found, track as missing.
missingValidators = append(missingValidators, pubKey)
continue
}
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Add provided indices to the indices set.
for _, idx := range req.Indices {
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Depending on the indices and public keys given, results might not be sorted.
sort.Slice(validatorIndices, func(i, j int) bool {
return validatorIndices[i] < validatorIndices[j]
})
currentEpoch := coreTime.CurrentEpoch(headState)
responseCap = len(validatorIndices)
pubKeys := make([][]byte, 0, responseCap)
beforeTransitionBalances := make([]uint64, 0, responseCap)
afterTransitionBalances := make([]uint64, 0, responseCap)
effectiveBalances := make([]uint64, 0, responseCap)
correctlyVotedSource := make([]bool, 0, responseCap)
correctlyVotedTarget := make([]bool, 0, responseCap)
correctlyVotedHead := make([]bool, 0, responseCap)
inactivityScores := make([]uint64, 0, responseCap)
// Append performance summaries.
// Also track missing validators using public keys.
for _, idx := range validatorIndices {
val, err := headState.ValidatorAtIndexReadOnly(idx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get validator"), Reason: Internal}
}
pubKey := val.PublicKey()
if uint64(idx) >= uint64(len(validatorSummary)) {
// Not listed in validator summary yet; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
// Inactive validator; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
summary := validatorSummary[idx]
pubKeys = append(pubKeys, pubKey[:])
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
if headState.Version() == version.Phase0 {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
} else {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
inactivityScores = append(inactivityScores, summary.InactivityScore)
}
}
return &ethpb.ValidatorPerformanceResponse{
PublicKeys: pubKeys,
CorrectlyVotedSource: correctlyVotedSource,
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
CorrectlyVotedHead: correctlyVotedHead,
CurrentEffectiveBalances: effectiveBalances,
BalancesBeforeEpochTransition: beforeTransitionBalances,
BalancesAfterEpochTransition: afterTransitionBalances,
MissingValidators: missingValidators,
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}

View File

@@ -1,48 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//proto/engine/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//network:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
],
)

View File

@@ -1,131 +0,0 @@
package builder
import (
"fmt"
"net/http"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ExpectedWithdrawals get the withdrawals computed from the specified state, that will be included in the block that gets built on the specified state.
func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Retrieve beacon state
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
network.WriteError(w, &network.DefaultErrorJson{
Message: "state_id is required in URL params",
Code: http.StatusBadRequest,
})
return
}
st, err := s.Stater.State(r.Context(), []byte(stateId))
if err != nil {
network.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
return
}
queryParam := r.URL.Query().Get("proposal_slot")
var proposalSlot primitives.Slot
if queryParam != "" {
pSlot, err := strconv.ParseUint(queryParam, 10, 64)
if err != nil {
network.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
return
}
proposalSlot = primitives.Slot(pSlot)
} else {
proposalSlot = st.Slot() + 1
}
// Perform sanity checks on proposal slot before computing state
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
return
}
if proposalSlot < capellaStart {
network.WriteError(w, &network.DefaultErrorJson{
Message: "expected withdrawals are not supported before Capella fork",
Code: http.StatusBadRequest,
})
return
}
if proposalSlot <= st.Slot() {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot must be bigger than state slot. proposal slot: %d, state slot: %d", proposalSlot, st.Slot()),
Code: http.StatusBadRequest,
})
return
}
lookAheadLimit := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().MaxSeedLookahead)))
if st.Slot().Add(lookAheadLimit) <= proposalSlot {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot cannot be >= %d slots ahead of state slot", lookAheadLimit),
Code: http.StatusBadRequest,
})
return
}
// Get metadata for response
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return
}
var blockRoot = [32]byte(root)
isFinalized := s.FinalizationFetcher.IsFinalized(r.Context(), blockRoot)
// Advance state forward to proposal slot
st, err = transition.ProcessSlots(r.Context(), st, proposalSlot)
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not process slots",
Code: http.StatusInternalServerError,
})
return
}
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not get expected withdrawals",
Code: http.StatusInternalServerError,
})
return
}
network.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
})
}
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*ExpectedWithdrawal {
data := make([]*ExpectedWithdrawal, len(withdrawals))
for i, withdrawal := range withdrawals {
data[i] = &ExpectedWithdrawal{
Address: hexutil.Encode(withdrawal.Address),
Amount: strconv.FormatUint(withdrawal.Amount, 10),
Index: strconv.FormatUint(withdrawal.Index, 10),
ValidatorIndex: strconv.FormatUint(uint64(withdrawal.ValidatorIndex), 10),
}
}
return data
}
func handleWrapError(err error, message string, code int) *network.DefaultErrorJson {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, message).Error(),
Code: code,
}
}

View File

@@ -1,210 +0,0 @@
package builder
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestExpectedWithdrawals_BadRequest(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
testCases := []struct {
name string
path string
urlParams map[string]string
state state.BeaconState
errorMessage string
}{
{
name: "no state_id url params",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot" +
strconv.FormatUint(uint64(currentSlot), 10),
urlParams: map[string]string{},
state: nil,
errorMessage: "state_id is required in URL params",
},
{
name: "invalid proposal slot value",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=aaa",
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "invalid proposal slot value",
},
{
name: "proposal slot < Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot)-1, 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "expected withdrawals are not supported before Capella fork",
},
{
name: "proposal slot == Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot must be bigger than state slot",
},
{
name: "Proposal slot >= 128 slots ahead of state slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(currentSlot+128), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot cannot be >= 128 slots ahead of state slot",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: testCase.state},
}
request := httptest.NewRequest("GET", testCase.path, nil)
request = mux.SetURLVars(request, testCase.urlParams)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, testCase.errorMessage, e.Message)
})
}
}
func TestExpectedWithdrawals(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
t.Run("get correct expected withdrawals", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.MaxValidatorsPerWithdrawalsSweep = 16
params.OverrideBeaconConfig(cfg)
// Update state with updated validator fields
valCount := 17
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
val := &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
validators = append(validators, val)
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
epoch := slots.ToEpoch(st.Slot())
// Fully withdrawable now with more than 0 balance
validators[5].WithdrawableEpoch = epoch
// Fully withdrawable now but 0 balance
validators[10].WithdrawableEpoch = epoch
balances[10] = 0
// Partially withdrawable now but fully withdrawable after 1 epoch
validators[14].WithdrawableEpoch = epoch + 1
balances[14] += params.BeaconConfig().MinDepositAmount
// Partially withdrawable
validators[15].WithdrawableEpoch = epoch + 2
balances[15] += params.BeaconConfig().MinDepositAmount
// Above sweep bound
validators[16].WithdrawableEpoch = epoch + 1
balances[16] += params.BeaconConfig().MinDepositAmount
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
inactivityScores := make([]uint64, valCount)
for i := range inactivityScores {
inactivityScores[i] = 10
}
require.NoError(t, st.SetInactivityScores(inactivityScores))
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: st},
}
request := httptest.NewRequest(
"GET", "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot="+
strconv.FormatUint(uint64(currentSlot+params.BeaconConfig().SlotsPerEpoch), 10), nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ExpectedWithdrawalsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
assert.Equal(t, false, resp.Finalized)
assert.Equal(t, 3, len(resp.Data))
expectedWithdrawal1 := &ExpectedWithdrawal{
Index: strconv.FormatUint(0, 10),
ValidatorIndex: strconv.FormatUint(5, 10),
Address: hexutil.Encode(validators[5].WithdrawalCredentials[12:]),
// Decreased due to epoch processing when state advanced forward
Amount: strconv.FormatUint(31998257885, 10),
}
expectedWithdrawal2 := &ExpectedWithdrawal{
Index: strconv.FormatUint(1, 10),
ValidatorIndex: strconv.FormatUint(14, 10),
Address: hexutil.Encode(validators[14].WithdrawalCredentials[12:]),
// MaxEffectiveBalance + MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(32998257885, 10),
}
expectedWithdrawal3 := &ExpectedWithdrawal{
Index: strconv.FormatUint(2, 10),
ValidatorIndex: strconv.FormatUint(15, 10),
Address: hexutil.Encode(validators[15].WithdrawalCredentials[12:]),
// MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(998257885, 10),
}
require.DeepEqual(t, expectedWithdrawal1, resp.Data[0])
require.DeepEqual(t, expectedWithdrawal2, resp.Data[1])
require.DeepEqual(t, expectedWithdrawal3, resp.Data[2])
})
}

View File

@@ -1,12 +0,0 @@
package builder
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
)
type Server struct {
FinalizationFetcher blockchain.FinalizationFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Stater lookup.Stater
}

View File

@@ -1,14 +0,0 @@
package builder
type ExpectedWithdrawalsResponse struct {
Data []*ExpectedWithdrawal `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ExpectedWithdrawal struct {
Address string `json:"address" hex:"true"`
Amount string `json:"amount"`
Index string `json:"index"`
ValidatorIndex string `json:"validator_index"`
}

View File

@@ -13,21 +13,14 @@ go_library(
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
],
)
@@ -40,7 +33,6 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -1,8 +1,6 @@
package rewards
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
@@ -10,19 +8,12 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
coreblocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/wealdtech/go-bytesutil"
)
// BlockRewards is an HTTP handler for Beacon API getBlockRewards.
@@ -37,7 +28,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
}
if blk.Version() == version.Phase0 {
errJson := &network.DefaultErrorJson{
Message: "Block rewards are not supported for Phase 0 blocks",
Message: "block rewards are not supported for Phase 0 blocks",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
@@ -50,7 +41,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get state: " + err.Error(),
Message: errors.Wrapf(err, "could not get state").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -61,7 +52,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
initBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -70,7 +61,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = altair.ProcessAttestationsNoVerifySignature(r.Context(), st, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attestation rewards" + err.Error(),
Message: errors.Wrapf(err, "could not get attestation rewards").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -79,7 +70,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
attBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -88,7 +79,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = coreblocks.ProcessAttesterSlashings(r.Context(), st, blk.Block().Body().AttesterSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attester slashing rewards: " + err.Error(),
Message: errors.Wrapf(err, "could not get attester slashing rewards").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -97,7 +88,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
attSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -106,7 +97,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = coreblocks.ProcessProposerSlashings(r.Context(), st, blk.Block().Body().ProposerSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get proposer slashing rewards" + err.Error(),
Message: errors.Wrapf(err, "could not get proposer slashing rewards").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -115,7 +106,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
proposerSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -124,7 +115,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
sa, err := blk.Block().Body().SyncAggregate()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get sync aggregate: " + err.Error(),
Message: errors.Wrapf(err, "could not get sync aggregate").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -134,7 +125,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
_, syncCommitteeReward, err = altair.ProcessSyncAggregate(r.Context(), st, sa)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get sync aggregate rewards: " + err.Error(),
Message: errors.Wrapf(err, "could not get sync aggregate rewards").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -144,7 +135,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Message: errors.Wrapf(err, "could not get optimistic mode info").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -153,7 +144,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Message: errors.Wrapf(err, "could not get block root").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -161,7 +152,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
}
response := &BlockRewardsResponse{
Data: BlockRewards{
Data: &BlockRewards{
ProposerIndex: strconv.FormatUint(uint64(proposerIndex), 10),
Total: strconv.FormatUint(proposerSlashingsBalance-initBalance+syncCommitteeReward, 10),
Attestations: strconv.FormatUint(attBalance-initBalance, 10),
@@ -175,300 +166,22 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
network.WriteJson(w, response)
}
// AttestationRewards retrieves attestation reward info for validators specified by array of public keys or validator index.
// If no array is provided, return reward info for every validator.
// TODO: Inclusion delay
func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
st, ok := s.attRewardsState(w, r)
if !ok {
return
}
bal, vals, valIndices, ok := attRewardsBalancesAndVals(w, r, st)
if !ok {
return
}
totalRewards, ok := totalAttRewards(w, st, bal, vals, valIndices)
if !ok {
return
}
idealRewards, ok := idealAttRewards(w, st, bal, vals)
if !ok {
return
}
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
blkRoot, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
resp := &AttestationRewardsResponse{
Data: AttestationRewards{
IdealRewards: idealRewards,
TotalRewards: totalRewards,
},
ExecutionOptimistic: optimistic,
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
}
network.WriteJson(w, resp)
}
func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.BeaconState, bool) {
segments := strings.Split(r.URL.Path, "/")
requestedEpoch, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode epoch: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, false
}
if primitives.Epoch(requestedEpoch) < params.BeaconConfig().AltairForkEpoch {
errJson := &network.DefaultErrorJson{
Message: "Attestation rewards are not supported for Phase 0",
Code: http.StatusNotFound,
}
network.WriteError(w, errJson)
return nil, false
}
currentEpoch := uint64(slots.ToEpoch(s.TimeFetcher.CurrentSlot()))
if requestedEpoch+1 >= currentEpoch {
errJson := &network.DefaultErrorJson{
Code: http.StatusNotFound,
Message: "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion",
}
network.WriteError(w, errJson)
return nil, false
}
nextEpochEnd, err := slots.EpochEnd(primitives.Epoch(requestedEpoch + 1))
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get next epoch's ending slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get state for epoch's starting slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
return st, true
}
func attRewardsBalancesAndVals(
w http.ResponseWriter,
r *http.Request,
st state.BeaconState,
) (*precompute.Balance, []*precompute.Validator, []primitives.ValidatorIndex, bool) {
allVals, bal, err := altair.InitializePrecomputeValidators(r.Context(), st)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not initialize precompute validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
allVals, bal, err = altair.ProcessEpochParticipation(r.Context(), st, bal, allVals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not process epoch participation: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
var rawValIds []string
if r.Body != http.NoBody {
if err = json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
}
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
for i, v := range rawValIds {
index, err := strconv.ParseUint(v, 10, 64)
if err != nil {
pubkey, err := bytesutil.FromHexString(v)
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
var ok bool
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
if !ok {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
} else {
if index >= uint64(st.NumValidators()) {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
valIndices[i] = primitives.ValidatorIndex(index)
}
}
if len(valIndices) == 0 {
valIndices = make([]primitives.ValidatorIndex, len(allVals))
for i := 0; i < len(allVals); i++ {
valIndices[i] = primitives.ValidatorIndex(i)
}
}
if len(valIndices) == len(allVals) {
return bal, allVals, valIndices, true
} else {
filteredVals := make([]*precompute.Validator, len(valIndices))
for i, valIx := range valIndices {
filteredVals[i] = allVals[valIx]
}
return bal, filteredVals, valIndices, true
}
}
// idealAttRewards returns rewards for hypothetical, perfectly voting validators
// whose effective balances are over EJECTION_BALANCE and match balances in passed in validators.
func idealAttRewards(
w http.ResponseWriter,
st state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
) ([]IdealAttestationReward, bool) {
idealValsCount := uint64(16)
minIdealBalance := uint64(17)
maxIdealBalance := minIdealBalance + idealValsCount - 1
idealRewards := make([]IdealAttestationReward, 0, idealValsCount)
idealVals := make([]*precompute.Validator, 0, idealValsCount)
increment := params.BeaconConfig().EffectiveBalanceIncrement
for i := minIdealBalance; i <= maxIdealBalance; i++ {
for _, v := range vals {
if v.CurrentEpochEffectiveBalance/1e9 == i {
effectiveBalance := i * increment
idealVals = append(idealVals, &precompute.Validator{
IsActivePrevEpoch: true,
IsSlashed: false,
CurrentEpochEffectiveBalance: effectiveBalance,
IsPrevEpochSourceAttester: true,
IsPrevEpochTargetAttester: true,
IsPrevEpochHeadAttester: true,
})
idealRewards = append(idealRewards, IdealAttestationReward{EffectiveBalance: strconv.FormatUint(effectiveBalance, 10)})
break
}
}
}
deltas, err := altair.AttestationsDelta(st, bal, idealVals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
idealRewards[i].Head = strconv.FormatUint(d.HeadReward, 10)
if d.SourcePenalty > 0 {
idealRewards[i].Source = fmt.Sprintf("-%s", strconv.FormatUint(d.SourcePenalty, 10))
} else {
idealRewards[i].Source = strconv.FormatUint(d.SourceReward, 10)
}
if d.TargetPenalty > 0 {
idealRewards[i].Target = fmt.Sprintf("-%s", strconv.FormatUint(d.TargetPenalty, 10))
} else {
idealRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
}
}
return idealRewards, true
}
func totalAttRewards(
w http.ResponseWriter,
st state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
valIndices []primitives.ValidatorIndex,
) ([]TotalAttestationReward, bool) {
totalRewards := make([]TotalAttestationReward, len(valIndices))
for i, v := range valIndices {
totalRewards[i] = TotalAttestationReward{ValidatorIndex: strconv.FormatUint(uint64(v), 10)}
}
deltas, err := altair.AttestationsDelta(st, bal, vals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
totalRewards[i].Head = strconv.FormatUint(d.HeadReward, 10)
if d.SourcePenalty > 0 {
totalRewards[i].Source = fmt.Sprintf("-%s", strconv.FormatUint(d.SourcePenalty, 10))
} else {
totalRewards[i].Source = strconv.FormatUint(d.SourceReward, 10)
}
if d.TargetPenalty > 0 {
totalRewards[i].Target = fmt.Sprintf("-%s", strconv.FormatUint(d.TargetPenalty, 10))
} else {
totalRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
}
}
return totalRewards, true
}
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
if errors.Is(err, lookup.BlockIdParseError{}) {
return &network.DefaultErrorJson{
Message: "Invalid block ID: " + err.Error(),
Message: errors.Wrapf(err, "invalid block ID").Error(),
Code: http.StatusBadRequest,
}
}
if err != nil {
return &network.DefaultErrorJson{
Message: "Could not get block from block ID: " + err.Error(),
Message: errors.Wrapf(err, "could not get block from block ID").Error(),
Code: http.StatusInternalServerError,
}
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return &network.DefaultErrorJson{
Message: "Could not find requested block" + err.Error(),
Message: errors.Wrapf(err, "could not find requested block").Error(),
Code: http.StatusNotFound,
}
}

View File

@@ -4,11 +4,8 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/prysmaticlabs/go-bitfield"
@@ -16,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockstategen "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen/mock"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -36,7 +32,7 @@ import (
func TestBlockRewards(t *testing.T) {
valCount := 64
st, err := util.NewBeaconStateCapella()
st, err := util.NewBeaconStateAltair()
require.NoError(t, st.SetSlot(1))
require.NoError(t, err)
validators := make([]*eth.Validator, 0, valCount)
@@ -197,285 +193,6 @@ func TestBlockRewards(t *testing.T) {
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Block rewards are not supported for Phase 0 blocks", e.Message)
})
}
func TestAttestationRewards(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
params.OverrideBeaconConfig(cfg)
valCount := 64
st, err := util.NewBeaconStateCapella()
require.NoError(t, err)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*3-1))
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
secretKeys := make([]bls.SecretKey, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
secretKeys = append(secretKeys, blsKey)
validators = append(validators, &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance / 64 * uint64(i+1),
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance/64*uint64(i+1))
}
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
participation := make([]byte, len(validators))
for i := range participation {
participation[i] = 0b111
}
require.NoError(t, st.SetCurrentParticipationBits(participation))
require.NoError(t, st.SetPreviousParticipationBits(participation))
currentSlot := params.BeaconConfig().SlotsPerEpoch * 3
mockChainService := &mock.ChainService{Optimistic: true, Slot: &currentSlot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
}},
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
}
t.Run("ok - ideal rewards", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 16, len(resp.Data.IdealRewards))
sum := uint64(0)
for _, r := range resp.Data.IdealRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(20756849), sum)
})
t.Run("ok - filtered vals", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
valIds, err := json.Marshal([]string{"20", pubkey})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data.TotalRewards))
sum := uint64(0)
for _, r := range resp.Data.TotalRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(794265), sum)
})
t.Run("ok - all vals", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 64, len(resp.Data.TotalRewards))
sum := uint64(0)
for _, r := range resp.Data.TotalRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(54221955), sum)
})
t.Run("ok - penalty", func(t *testing.T) {
st, err := util.NewBeaconStateCapella()
require.NoError(t, err)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*3-1))
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
secretKeys := make([]bls.SecretKey, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
secretKeys = append(secretKeys, blsKey)
validators = append(validators, &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance / 64 * uint64(i),
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance/64*uint64(i))
}
validators[63].Slashed = true
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
participation := make([]byte, len(validators))
for i := range participation {
participation[i] = 0b111
}
require.NoError(t, st.SetCurrentParticipationBits(participation))
require.NoError(t, st.SetPreviousParticipationBits(participation))
currentSlot := params.BeaconConfig().SlotsPerEpoch * 3
mockChainService := &mock.ChainService{Optimistic: true, Slot: &currentSlot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
}},
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
}
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"63"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "0", resp.Data.TotalRewards[0].Head)
assert.Equal(t, "-432270", resp.Data.TotalRewards[0].Source)
assert.Equal(t, "-802788", resp.Data.TotalRewards[0].Target)
})
t.Run("invalid validator index/pubkey", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"10", "foo"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
})
t.Run("unknown validator pubkey", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
privkey, err := bls.RandKey()
require.NoError(t, err)
pubkey := fmt.Sprintf("%#x", privkey.PublicKey().Marshal())
valIds, err := json.Marshal([]string{"10", pubkey})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
})
t.Run("validator index too large", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"10", "999"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Validator index 999 is too large. Maximum allowed index is 63", e.Message)
})
t.Run("phase 0", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/0"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are not supported for Phase 0", e.Message)
})
t.Run("invalid epoch", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/foo"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "Could not decode epoch"))
})
t.Run("previous epoch", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/2"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion", e.Message)
assert.Equal(t, "block rewards are not supported for Phase 0 blocks", e.Message)
})
}

View File

@@ -11,8 +11,4 @@ type Server struct {
OptimisticModeFetcher blockchain.OptimisticModeFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ReplayerBuilder stategen.ReplayerBuilder
// TODO: Init
TimeFetcher blockchain.TimeFetcher
Stater lookup.Stater
HeadFetcher blockchain.HeadFetcher
}

View File

@@ -1,9 +1,9 @@
package rewards
type BlockRewardsResponse struct {
Data BlockRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *BlockRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type BlockRewards struct {
@@ -14,29 +14,3 @@ type BlockRewards struct {
ProposerSlashings string `json:"proposer_slashings"`
AttesterSlashings string `json:"attester_slashings"`
}
type AttestationRewardsResponse struct {
Data AttestationRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type AttestationRewards struct {
IdealRewards []IdealAttestationReward `json:"ideal_rewards"`
TotalRewards []TotalAttestationReward `json:"total_rewards"`
}
type IdealAttestationReward struct {
EffectiveBalance string `json:"effective_balance"`
Head string `json:"head"`
Target string `json:"target"`
Source string `json:"source"`
}
type TotalAttestationReward struct {
ValidatorIndex string `json:"validator_index"`
Head string `json:"head"`
Target string `json:"target"`
Source string `json:"source"`
InclusionDelay string `json:"inclusion_delay"`
}

View File

@@ -1,49 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/sync:go_default_library",
"//network:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"handlers_test.go",
"server_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//network:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/host/peerstore/test:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
],
)

View File

@@ -1,177 +0,0 @@
package node
import (
"encoding/json"
"io"
"net/http"
"strings"
corenet "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// ListTrustedPeer retrieves data about the node's trusted peers.
func (s *Server) ListTrustedPeer(w http.ResponseWriter, r *http.Request) {
peerStatus := s.PeersFetcher.Peers()
allIds := s.PeersFetcher.Peers().GetTrustedPeers()
allPeers := make([]*Peer, 0, len(allIds))
for _, id := range allIds {
p, err := httpPeerInfo(peerStatus, id)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not get peer info").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
// peers added into trusted set but never connected should also be listed
if p == nil {
p = &Peer{
PeerID: id.String(),
Enr: "",
LastSeenP2PAddress: "",
State: eth.ConnectionState(corenet.NotConnected).String(),
Direction: eth.PeerDirection(corenet.DirUnknown).String(),
}
}
allPeers = append(allPeers, p)
}
response := &PeersResponse{Peers: allPeers}
network.WriteJson(w, response)
}
// AddTrustedPeer adds a new peer into node's trusted peer set by Multiaddr
func (s *Server) AddTrustedPeer(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not read request body").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
var addrRequest *AddrRequest
err = json.Unmarshal(body, &addrRequest)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not decode request body into peer address").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
info, err := peer.AddrInfoFromString(addrRequest.Addr)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not derive peer info from multiaddress").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
// also add new peerdata to peers
direction, err := s.PeersFetcher.Peers().Direction(info.ID)
if err != nil {
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], corenet.DirUnknown)
} else {
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], direction)
}
peers := []peer.ID{}
peers = append(peers, info.ID)
s.PeersFetcher.Peers().SetTrustedPeers(peers)
w.WriteHeader(http.StatusOK)
}
// RemoveTrustedPeer removes peer from our trusted peer set but does not close connection.
func (s *Server) RemoveTrustedPeer(w http.ResponseWriter, r *http.Request) {
segments := strings.Split(r.URL.Path, "/")
id := segments[len(segments)-1]
peerId, err := peer.Decode(id)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not decode peer id").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
// if the peer is not a trusted peer, do nothing but return 200
if !s.PeersFetcher.Peers().IsTrustedPeers(peerId) {
w.WriteHeader(http.StatusOK)
return
}
peers := []peer.ID{}
peers = append(peers, peerId)
s.PeersFetcher.Peers().DeleteTrustedPeers(peers)
w.WriteHeader(http.StatusOK)
}
// httpPeerInfo does the same thing as peerInfo function in node.go but returns the
// http peer response.
func httpPeerInfo(peerStatus *peers.Status, id peer.ID) (*Peer, error) {
enr, err := peerStatus.ENR(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain ENR")
}
var serializedEnr string
if enr != nil {
serializedEnr, err = p2p.SerializeENR(enr)
if err != nil {
return nil, errors.Wrap(err, "could not serialize ENR")
}
}
address, err := peerStatus.Address(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain address")
}
connectionState, err := peerStatus.ConnectionState(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain connection state")
}
direction, err := peerStatus.Direction(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain direction")
}
if eth.PeerDirection(direction) == eth.PeerDirection_UNKNOWN {
return nil, nil
}
v1ConnState := eth.ConnectionState(connectionState).String()
v1PeerDirection := eth.PeerDirection(direction).String()
p := Peer{
PeerID: id.String(),
State: v1ConnState,
Direction: v1PeerDirection,
}
if address != nil {
p.LastSeenP2PAddress = address.String()
}
if serializedEnr != "" {
p.Enr = "enr:" + serializedEnr
}
return &p, nil
}

View File

@@ -1,250 +0,0 @@
package node
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
corenet "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
libp2ptest "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
ma "github.com/multiformats/go-multiaddr"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
type testIdentity enode.ID
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
func TestListTrustedPeer(t *testing.T) {
ids := libp2ptest.GeneratePeerIDs(9)
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
peerStatus := peerFetcher.Peers()
for i, id := range ids {
if i == len(ids)-1 {
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
require.NoError(t, err)
peerStatus.Add(nil, id, p2pMultiAddr, corenet.DirUnknown)
continue
}
enrRecord := &enr.Record{}
err := enrRecord.SetSig(testIdentity{1}, []byte{42})
require.NoError(t, err)
enrRecord.Set(enr.IPv4{127, 0, 0, byte(i)})
err = enrRecord.SetSig(testIdentity{}, []byte{})
require.NoError(t, err)
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
require.NoError(t, err)
var direction corenet.Direction
if i%2 == 0 {
direction = corenet.DirInbound
} else {
direction = corenet.DirOutbound
}
peerStatus.Add(enrRecord, id, p2pMultiAddr, direction)
switch i {
case 0, 1:
peerStatus.SetConnectionState(id, peers.PeerConnecting)
case 2, 3:
peerStatus.SetConnectionState(id, peers.PeerConnected)
case 4, 5:
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
case 6, 7:
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
default:
t.Fatalf("Failed to set connection state for peer")
}
}
s := Server{PeersFetcher: peerFetcher}
// set all peers as trusted peers
s.PeersFetcher.Peers().SetTrustedPeers(ids)
t.Run("Peer data OK", func(t *testing.T) {
url := "http://anything.is.fine"
request := httptest.NewRequest("GET", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListTrustedPeer(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &PeersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
peers := resp.Peers
// assert number of trusted peer is right
assert.Equal(t, 9, len(peers))
for i := 0; i < 9; i++ {
pid, err := peer.Decode(peers[i].PeerID)
require.NoError(t, err)
if pid == ids[8] {
assert.Equal(t, "", peers[i].Enr)
assert.Equal(t, "", peers[i].LastSeenP2PAddress)
assert.Equal(t, "DISCONNECTED", peers[i].State)
assert.Equal(t, "UNKNOWN", peers[i].Direction)
continue
}
expectedEnr, err := peerStatus.ENR(pid)
require.NoError(t, err)
serializeENR, err := p2p.SerializeENR(expectedEnr)
require.NoError(t, err)
assert.Equal(t, "enr:"+serializeENR, peers[i].Enr)
expectedP2PAddr, err := peerStatus.Address(pid)
require.NoError(t, err)
assert.Equal(t, expectedP2PAddr.String(), peers[i].LastSeenP2PAddress)
switch pid {
case ids[0]:
assert.Equal(t, "CONNECTING", peers[i].State)
assert.Equal(t, "INBOUND", peers[i].Direction)
case ids[1]:
assert.Equal(t, "CONNECTING", peers[i].State)
assert.Equal(t, "OUTBOUND", peers[i].Direction)
case ids[2]:
assert.Equal(t, "CONNECTED", peers[i].State)
assert.Equal(t, "INBOUND", peers[i].Direction)
case ids[3]:
assert.Equal(t, "CONNECTED", peers[i].State)
assert.Equal(t, "OUTBOUND", peers[i].Direction)
case ids[4]:
assert.Equal(t, "DISCONNECTING", peers[i].State)
assert.Equal(t, "INBOUND", peers[i].Direction)
case ids[5]:
assert.Equal(t, "DISCONNECTING", peers[i].State)
assert.Equal(t, "OUTBOUND", peers[i].Direction)
case ids[6]:
assert.Equal(t, "DISCONNECTED", peers[i].State)
assert.Equal(t, "INBOUND", peers[i].Direction)
case ids[7]:
assert.Equal(t, "DISCONNECTED", peers[i].State)
assert.Equal(t, "OUTBOUND", peers[i].Direction)
default:
t.Fatalf("Failed to get connection state and direction for peer")
}
}
})
}
func TestListTrustedPeers_NoPeersReturnsEmptyArray(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine"
request := httptest.NewRequest("GET", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListTrustedPeer(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &PeersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
peers := resp.Peers
assert.Equal(t, 0, len(peers))
}
func TestAddTrustedPeer(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine"
addr := &AddrRequest{
Addr: "/ip4/127.0.0.1/tcp/30303/p2p/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ",
}
addrJson, err := json.Marshal(addr)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(addrJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AddTrustedPeer(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
}
func TestAddTrustedPeer_EmptyBody(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AddTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, "Could not decode request body into peer address: unexpected end of JSON input", e.Message)
}
func TestAddTrustedPeer_BadAddress(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine"
addr := &AddrRequest{
Addr: "anything/but/not/an/address",
}
addrJson, err := json.Marshal(addr)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(addrJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AddTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Could not derive peer info from multiaddress", e.Message)
}
func TestRemoveTrustedPeer(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine.but.last.is.important/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ"
request := httptest.NewRequest("DELETE", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.RemoveTrustedPeer(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
}
func TestRemoveTrustedPeer_EmptyParameter(t *testing.T) {
peerFetcher := &mockp2p.MockPeersProvider{}
peerFetcher.ClearPeers()
s := Server{PeersFetcher: peerFetcher}
url := "http://anything.is.fine"
request := httptest.NewRequest("DELETE", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.RemoveTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, "Could not decode peer id: failed to parse peer ID: invalid cid: cid too short", e.Message)
}

View File

@@ -1,21 +0,0 @@
package node
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
type Server struct {
SyncChecker sync.Checker
OptimisticModeFetcher blockchain.OptimisticModeFetcher
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
}

View File

@@ -1 +0,0 @@
package node

View File

@@ -1,17 +0,0 @@
package node
type AddrRequest struct {
Addr string `json:"addr"`
}
type PeersResponse struct {
Peers []*Peer `json:"Peers"`
}
type Peer struct {
PeerID string `json:"peer_id"`
Enr string `json:"enr"`
LastSeenP2PAddress string `json:"last_seen_p2p_address"`
State string `json:"state"`
Direction string `json:"direction"`
}

View File

@@ -38,7 +38,6 @@ go_library(
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",

View File

@@ -13,7 +13,6 @@ import (
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/cmd"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -660,14 +659,153 @@ func (bs *Server) GetValidatorPerformance(
ctx context.Context, req *ethpb.ValidatorPerformanceRequest,
) (*ethpb.ValidatorPerformanceResponse, error) {
if bs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
headState, err := bs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
response, err := core.ComputeValidatorPerformance(ctx, req, bs.HeadFetcher, currSlot)
if err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not compute validator performance: %v", err.Err)
if currSlot > headState.Slot() {
headRoot, err := bs.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", currSlot, err)
}
}
return response, nil
var validatorSummary []*precompute.Validator
if headState.Version() == version.Phase0 {
vp, bp, err := precompute.New(ctx, headState)
if err != nil {
return nil, err
}
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
if err != nil {
return nil, err
}
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
if err != nil {
return nil, err
}
validatorSummary = vp
} else if headState.Version() >= version.Altair {
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return nil, err
}
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
if err != nil {
return nil, err
}
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
if err != nil {
return nil, err
}
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
if err != nil {
return nil, err
}
validatorSummary = vp
} else {
return nil, status.Errorf(codes.Internal, "Head state version %d not supported", headState.Version())
}
responseCap := len(req.Indices) + len(req.PublicKeys)
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
missingValidators := make([][]byte, 0, responseCap)
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
// Convert the list of validator public keys to validator indices and add to the indices set.
for _, pubKey := range req.PublicKeys {
// Skip empty public key.
if len(pubKey) == 0 {
continue
}
pubkeyBytes := bytesutil.ToBytes48(pubKey)
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
// Validator index not found, track as missing.
missingValidators = append(missingValidators, pubKey)
continue
}
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Add provided indices to the indices set.
for _, idx := range req.Indices {
if !filtered[idx] {
validatorIndices = append(validatorIndices, idx)
filtered[idx] = true
}
}
// Depending on the indices and public keys given, results might not be sorted.
sort.Slice(validatorIndices, func(i, j int) bool {
return validatorIndices[i] < validatorIndices[j]
})
currentEpoch := coreTime.CurrentEpoch(headState)
responseCap = len(validatorIndices)
pubKeys := make([][]byte, 0, responseCap)
beforeTransitionBalances := make([]uint64, 0, responseCap)
afterTransitionBalances := make([]uint64, 0, responseCap)
effectiveBalances := make([]uint64, 0, responseCap)
correctlyVotedSource := make([]bool, 0, responseCap)
correctlyVotedTarget := make([]bool, 0, responseCap)
correctlyVotedHead := make([]bool, 0, responseCap)
inactivityScores := make([]uint64, 0, responseCap)
// Append performance summaries.
// Also track missing validators using public keys.
for _, idx := range validatorIndices {
val, err := headState.ValidatorAtIndexReadOnly(idx)
if err != nil {
return nil, status.Errorf(codes.Internal, "could not get validator: %v", err)
}
pubKey := val.PublicKey()
if uint64(idx) >= uint64(len(validatorSummary)) {
// Not listed in validator summary yet; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
// Inactive validator; treat it as missing.
missingValidators = append(missingValidators, pubKey[:])
continue
}
summary := validatorSummary[idx]
pubKeys = append(pubKeys, pubKey[:])
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
if headState.Version() == version.Phase0 {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
} else {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
inactivityScores = append(inactivityScores, summary.InactivityScore)
}
}
return &ethpb.ValidatorPerformanceResponse{
PublicKeys: pubKeys,
CorrectlyVotedSource: correctlyVotedSource,
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
CorrectlyVotedHead: correctlyVotedHead,
CurrentEffectiveBalances: effectiveBalances,
BalancesBeforeEpochTransition: beforeTransitionBalances,
BalancesAfterEpochTransition: afterTransitionBalances,
MissingValidators: missingValidators,
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}
// GetIndividualVotes retrieves individual voting status of validators.

View File

@@ -66,7 +66,11 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
return nil, status.Errorf(codes.InvalidArgument, "Validator is not an aggregator")
}
if err := vs.AttPool.AggregateUnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex); err != nil {
return nil, status.Errorf(codes.Internal, "Could not aggregate unaggregated attestations")
}
aggregatedAtts := vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
// Filter out the best aggregated attestation (ie. the one with the most aggregated bits).
if len(aggregatedAtts) == 0 {
aggregatedAtts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)

View File

@@ -63,13 +63,16 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
curr := time.Now()
// process attestations and update head in forkchoice
vs.ForkchoiceFetcher.UpdateHead(ctx, vs.TimeFetcher.CurrentSlot())
log.Infof("proposer_mocker: update head in rpc took %s", time.Since(curr).String())
headRoot := vs.ForkchoiceFetcher.CachedHeadRoot()
parentRoot := vs.ForkchoiceFetcher.GetProposerHead()
if parentRoot != headRoot {
blockchain.LateBlockAttemptedReorgCount.Inc()
}
log.Infof("proposer_mocker: fetching head root in rpc took %s", time.Since(curr).String())
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
@@ -90,6 +93,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
}
log.Infof("proposer_mocker: fetching head state rpc took %s", time.Since(curr).String())
// Set slot, graffiti, randao reveal, and parent root.
sBlk.SetSlot(req.Slot)
@@ -103,9 +107,10 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, fmt.Errorf("could not calculate proposer index %v", err)
}
sBlk.SetProposerIndex(idx)
log.Infof("proposer_mocker: setting proposer index took %s", time.Since(curr).String())
if features.Get().BuildBlockParallel {
if err := vs.BuildBlockParallel(ctx, sBlk, head); err != nil {
if err := vs.BuildBlockParallel(ctx, sBlk, head, curr); err != nil {
return nil, errors.Wrap(err, "could not build block in parallel")
}
} else {
@@ -191,7 +196,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
}
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState) error {
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState, curr time.Time) error {
// Build consensus fields in background
var wg sync.WaitGroup
wg.Add(1)
@@ -205,6 +210,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
log.WithError(err).Error("Could not get eth1data")
}
sBlk.SetEth1Data(eth1Data)
log.Infof("proposer_mocker: setting eth1data took %s", time.Since(curr).String())
// Set deposit and attestation.
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
@@ -216,20 +222,26 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
sBlk.SetDeposits(deposits)
sBlk.SetAttestations(atts)
}
log.Infof("proposer_mocker: setting deposits and atts took %s", time.Since(curr).String())
// Set slashings.
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
sBlk.SetProposerSlashings(validProposerSlashings)
sBlk.SetAttesterSlashings(validAttSlashings)
log.Infof("proposer_mocker: setting slashings took %s", time.Since(curr).String())
// Set exits.
sBlk.SetVoluntaryExits(vs.getExits(head, sBlk.Block().Slot()))
log.Infof("proposer_mocker: setting exits took %s", time.Since(curr).String())
// Set sync aggregate. New in Altair.
vs.setSyncAggregate(ctx, sBlk)
log.Infof("proposer_mocker: setting sync aggs took %s", time.Since(curr).String())
// Set bls to execution change. New in Capella.
vs.setBlsToExecData(sBlk, head)
log.Infof("proposer_mocker: setting bls data took %s", time.Since(curr).String())
}()
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
@@ -246,6 +258,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
return status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
log.Infof("proposer_mocker: setting execution data took %s", time.Since(curr).String())
wg.Wait() // Wait until block is built via consensus and execution fields.
@@ -392,10 +405,12 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
curr := time.Now()
beaconState, err := vs.StateGen.StateByRoot(ctx, block.Block().ParentRoot())
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
}
log.Infof("proposer_mocker: fetching parent state took %s", time.Since(curr).String())
root, err := transition.CalculateStateRoot(
ctx,
beaconState,
@@ -404,6 +419,7 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.ReadOnl
if err != nil {
return nil, errors.Wrapf(err, "could not calculate state root at slot %d", beaconState.Slot())
}
log.Infof("proposer_mocker: calculating state root took %s", time.Since(curr).String())
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
return root[:], nil

View File

@@ -30,6 +30,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
@@ -184,3 +185,32 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
}
return stream.Send(res)
}
func (vs *Server) RandomStuff() {
for vs.TimeFetcher.GenesisTime().IsZero() {
time.Sleep(5 * time.Second)
}
genTime := vs.TimeFetcher.GenesisTime()
ticker := slots.NewSlotTicker(genTime, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-vs.Ctx.Done():
ticker.Done()
return
case slot := <-ticker.C():
curr := time.Now()
_, err := vs.GetBeaconBlock(context.Background(), &ethpb.BlockRequest{
Slot: slot,
Graffiti: make([]byte, 32),
RandaoReveal: make([]byte, 96),
})
if err != nil {
log.Error(err)
continue
}
log.Infof("proposer_mocker: successfully produced block %d in %s", slot, time.Since(curr).String())
}
}
}

View File

@@ -1,40 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"server.go",
"validator_performance.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/sync:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["validator_performance_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -1,14 +0,0 @@
package validator
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
// Server defines a server implementation for HTTP endpoints, providing
// access data relevant to the Ethereum Beacon Chain.
type Server struct {
GenesisTimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
HeadFetcher blockchain.HeadFetcher
}

View File

@@ -1,78 +0,0 @@
package validator
import (
"encoding/json"
"net/http"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
type ValidatorPerformanceRequest struct {
PublicKeys [][]byte `json:"public_keys,omitempty"`
Indices []primitives.ValidatorIndex `json:"indices,omitempty"`
}
type ValidatorPerformanceResponse struct {
PublicKeys [][]byte `json:"public_keys,omitempty"`
CorrectlyVotedSource []bool `json:"correctly_voted_source,omitempty"`
CorrectlyVotedTarget []bool `json:"correctly_voted_target,omitempty"`
CorrectlyVotedHead []bool `json:"correctly_voted_head,omitempty"`
CurrentEffectiveBalances []uint64 `json:"current_effective_balances,omitempty"`
BalancesBeforeEpochTransition []uint64 `json:"balances_before_epoch_transition,omitempty"`
BalancesAfterEpochTransition []uint64 `json:"balances_after_epoch_transition,omitempty"`
MissingValidators [][]byte `json:"missing_validators,omitempty"`
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
}
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
if vs.SyncChecker.Syncing() {
handleHTTPError(w, "Syncing", http.StatusServiceUnavailable)
return
}
ctx := r.Context()
currSlot := vs.GenesisTimeFetcher.CurrentSlot()
var req ValidatorPerformanceRequest
if r.Body != http.NoBody {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
handleHTTPError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
}
computed, err := core.ComputeValidatorPerformance(
ctx,
&ethpb.ValidatorPerformanceRequest{
PublicKeys: req.PublicKeys,
Indices: req.Indices,
},
vs.HeadFetcher,
currSlot,
)
if err != nil {
handleHTTPError(w, "Could not compute validator performance: "+err.Err.Error(), core.ErrorReasonToHTTP(err.Reason))
return
}
response := &ValidatorPerformanceResponse{
PublicKeys: computed.PublicKeys,
CorrectlyVotedSource: computed.CorrectlyVotedSource,
CorrectlyVotedTarget: computed.CorrectlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
CorrectlyVotedHead: computed.CorrectlyVotedHead,
CurrentEffectiveBalances: computed.CurrentEffectiveBalances,
BalancesBeforeEpochTransition: computed.BalancesBeforeEpochTransition,
BalancesAfterEpochTransition: computed.BalancesAfterEpochTransition,
MissingValidators: computed.MissingValidators,
InactivityScores: computed.InactivityScores, // Only populated in Altair
}
network.WriteJson(w, response)
}
func handleHTTPError(w http.ResponseWriter, message string, code int) {
errJson := &network.DefaultErrorJson{
Message: message,
Code: code,
}
network.WriteError(w, errJson)
}

View File

@@ -1,453 +0,0 @@
package validator
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestServer_GetValidatorPerformance(t *testing.T) {
t.Run("Syncing", func(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
var buf bytes.Buffer
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, rawResp.StatusCode)
})
t.Run("OK", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
headState, err := util.NewBeaconState()
require.NoError(t, err)
headState = setHeadState(t, headState, publicKeys)
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKeys[0][:]},
}
request := &ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
}
var buf bytes.Buffer
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
t.Run("Indices", func(t *testing.T) {
ctx := context.Background()
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
headState, err := util.NewBeaconState()
require.NoError(t, err)
headState = setHeadState(t, headState, publicKeys)
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
require.NoError(t, err)
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
require.NoError(t, err)
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
require.NoError(t, err)
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
MissingValidators: [][]byte{publicKeys[0][:]},
}
request := &ValidatorPerformanceRequest{
Indices: []primitives.ValidatorIndex{2, 1, 0},
}
var buf bytes.Buffer
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
t.Run("Indices Pubkeys", func(t *testing.T) {
ctx := context.Background()
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
headState, err := util.NewBeaconState()
require.NoError(t, err)
headState = setHeadState(t, headState, publicKeys)
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
require.NoError(t, err)
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
require.NoError(t, err)
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
require.NoError(t, err)
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
MissingValidators: [][]byte{publicKeys[0][:]},
}
request := &ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:]}, Indices: []primitives.ValidatorIndex{1, 2},
}
var buf bytes.Buffer
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
t.Run("Altair OK", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
epoch := primitives.Epoch(1)
headState, _ := util.DeterministicGenesisStateAltair(t, 32)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
headState = setHeadState(t, headState, publicKeys)
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKeys[0][:]},
InactivityScores: []uint64{0, 0},
}
request := &ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
t.Run("Bellatrix OK", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
epoch := primitives.Epoch(1)
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
headState = setHeadState(t, headState, publicKeys)
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKeys[0][:]},
InactivityScores: []uint64{0, 0},
}
request := &ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
t.Run("Capella OK", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
publicKeys := [][48]byte{
bytesutil.ToBytes48([]byte{1}),
bytesutil.ToBytes48([]byte{2}),
bytesutil.ToBytes48([]byte{3}),
}
epoch := primitives.Epoch(1)
headState, _ := util.DeterministicGenesisStateCapella(t, 32)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
headState = setHeadState(t, headState, publicKeys)
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKeys[0][:]},
InactivityScores: []uint64{0, 0},
}
request := &ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
require.NoError(t, err)
defer func() {
if err := rawResp.Body.Close(); err != nil {
t.Fatal(err)
}
}()
body, err := io.ReadAll(rawResp.Body)
require.NoError(t, err)
response := &ValidatorPerformanceResponse{}
require.NoError(t, json.Unmarshal(body, response))
require.DeepEqual(t, want, response)
})
}
func setHeadState(t *testing.T, headState state.BeaconState, publicKeys [][48]byte) state.BeaconState {
epoch := primitives.Epoch(1)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
if headState.Version() < version.Altair {
atts := make([]*ethpb.PendingAttestation, 3)
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
AggregationBits: bitfield.Bitlist{},
InclusionDelay: 1,
}
require.NoError(t, headState.AppendPreviousEpochAttestations(atts[i]))
}
}
defaultBal := params.BeaconConfig().MaxEffectiveBalance
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
require.NoError(t, headState.SetBalances(balances))
validators := []*ethpb.Validator{
{
PublicKey: publicKeys[0][:],
ActivationEpoch: 5,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKeys[1][:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKeys[2][:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
require.NoError(t, headState.SetValidators(validators))
return headState
}

View File

@@ -30,19 +30,16 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
rpcBuilder "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
nodeprysm "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node"
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/beacon"
debugv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/debug"
nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node"
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
httpserver "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator"
slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
@@ -214,19 +211,8 @@ func (s *Service) Start() {
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ReplayerBuilder: ch,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
}
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards)
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards)
builderServer := &rpcBuilder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
}
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals)
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
@@ -264,6 +250,7 @@ func (s *Service) Start() {
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
}
go validatorServer.RandomStuff()
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
@@ -308,22 +295,6 @@ func (s *Service) Start() {
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
nodeServerPrysm := &nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods("GET")
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods("POST")
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods("Delete")
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,
BeaconDB: s.cfg.BeaconDB,
@@ -372,12 +343,6 @@ func (s *Service) Start() {
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
}
httpServer := &httpserver.Server{
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
SyncChecker: s.cfg.SyncService,
}
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2)
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)

View File

@@ -207,7 +207,7 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
return stateutil.OptimizedValidatorRoots(val)
length = len(val)
}
roots := make([][32]byte, 0, length)
rootCreator := func(input *ethpb.Validator) error {
@@ -218,6 +218,15 @@ func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll b
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {

View File

@@ -117,7 +117,6 @@ type ReadOnlyValidators interface {
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
PublicKeys() [][fieldparams.BLSPubkeyLength]byte
PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.BLSPubkeyLength]byte
NumValidators() int
ReadFromEveryValidator(f func(idx int, val ReadOnlyValidator) error) error

View File

@@ -145,22 +145,6 @@ func (b *BeaconState) PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.
return bytesutil.ToBytes48(b.validators[idx].PublicKey)
}
// PublicKeys builds a list of all validator public keys, with each key's index aligned to its validator index.
func (b *BeaconState) PublicKeys() [][fieldparams.BLSPubkeyLength]byte {
b.lock.RLock()
defer b.lock.RLock()
res := make([][fieldparams.BLSPubkeyLength]byte, len(b.validators))
for i := 0; i < len(b.validators); i++ {
val := b.validators[i]
if val == nil {
continue
}
res[i] = bytesutil.ToBytes48(val.PublicKey)
}
return res
}
// NumValidators returns the size of the validator registry.
func (b *BeaconState) NumValidators() int {
b.lock.RLock()

View File

@@ -30,7 +30,7 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//cache/lru:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -155,20 +155,6 @@ func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
e.lock.Lock()
defer e.lock.Unlock()
rInfo, ok, err := e.getByBlockRootLockFree(blockRoot)
if err != nil {
return err
}
if !ok {
return nil
}
slotInfo := &slotRootInfo{
slot: rInfo.state.Slot(),
blockRoot: blockRoot,
}
if err = e.slotRootCache.Delete(slotInfo); err != nil {
return err
}
return e.rootStateCache.Delete(&rootStateInfo{
root: blockRoot,
})

View File

@@ -331,8 +331,9 @@ func (s *State) CombinedCache() *CombinedCache {
}
func (s *State) slotAvailable(slot primitives.Slot) bool {
if s.avb == nil {
return false
// default to assuming node was initialized from genesis - backfill only needs to be specified for checkpoint sync
if s.backfillStatus == nil {
return true
}
return s.avb.AvailableBlock(slot)
return s.backfillStatus.SlotCovered(slot)
}

View File

@@ -36,12 +36,12 @@ func (_ *State) replayBlocks(
var err error
start := time.Now()
rLog := log.WithFields(logrus.Fields{
log = log.WithFields(logrus.Fields{
"startSlot": state.Slot(),
"endSlot": targetSlot,
"diff": targetSlot - state.Slot(),
})
rLog.Debug("Replaying state")
log.Debug("Replaying state")
// The input block list is sorted in decreasing slots order.
if len(signed) > 0 {
for i := len(signed) - 1; i >= 0; i-- {
@@ -71,7 +71,7 @@ func (_ *State) replayBlocks(
}
duration := time.Since(start)
rLog.WithFields(logrus.Fields{
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("Replayed state")

View File

@@ -12,7 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
@@ -50,7 +50,7 @@ type State struct {
finalizedInfo *finalizedInfo
epochBoundaryStateCache *epochBoundaryState
saveHotStateDB *saveHotStateDbConfig
avb coverage.AvailableBlocker
backfillStatus *backfill.Status
migrationLock *sync.Mutex
fc forkchoice.ForkChoicer
}
@@ -77,9 +77,9 @@ type finalizedInfo struct {
// StateGenOption is a functional option for controlling the initialization of a *State value
type StateGenOption func(*State)
func WithAvailableBlocker(avb coverage.AvailableBlocker) StateGenOption {
func WithBackfillStatus(bfs *backfill.Status) StateGenOption {
return func(sg *State) {
sg.avb = avb
sg.backfillStatus = bfs
}
}

View File

@@ -2,10 +2,8 @@ package stategen
import (
"context"
"fmt"
"math"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -81,41 +79,6 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
if err := s.epochBoundaryStateCache.put(blockRoot, st); err != nil {
return err
}
} else {
// Always check that the correct epoch boundary states have been saved
// for the current epoch.
epochStart, err := slots.EpochStart(slots.ToEpoch(st.Slot()))
if err != nil {
return err
}
bRoot, err := helpers.BlockRootAtSlot(st, epochStart)
if err != nil {
return err
}
_, ok, err := s.epochBoundaryStateCache.getByBlockRoot([32]byte(bRoot))
if err != nil {
return err
}
// We would only recover the boundary states under this condition:
//
// 1) Would indicate that the epoch boundary was skipped due to a missed slot, we
// then recover by saving the state at that particular slot here.
if !ok {
// Only recover the state if it is in our hot state cache, otherwise we
// simply skip this step.
if s.hotStateCache.has([32]byte(bRoot)) {
log.WithFields(logrus.Fields{
"slot": epochStart,
"root": fmt.Sprintf("%#x", bRoot),
}).Debug("Recovering state for epoch boundary cache")
hState := s.hotStateCache.get([32]byte(bRoot))
if err := s.epochBoundaryStateCache.put([32]byte(bRoot), hState); err != nil {
return err
}
}
}
}
// On an intermediate slot, save state summary.

View File

@@ -7,7 +7,6 @@ import (
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
@@ -138,34 +137,6 @@ func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) {
require.Equal(t, false, beaconDB.HasState(ctx, r))
}
func TestSaveState_RecoverForEpochBoundary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB, doublylinkedtree.New())
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
r := [32]byte{'A'}
boundaryRoot := [32]byte{'B'}
require.NoError(t, beaconState.UpdateBlockRootAtIndex(0, boundaryRoot))
b := util.NewBeaconBlock()
util.SaveBlock(t, ctx, beaconDB, b)
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Save boundary state to the hot state cache.
boundaryState, _ := util.DeterministicGenesisState(t, 32)
service.hotStateCache.put(boundaryRoot, boundaryState)
require.NoError(t, service.SaveState(ctx, r, beaconState))
rInfo, ok, err := service.epochBoundaryStateCache.getByBlockRoot(boundaryRoot)
assert.NoError(t, err)
assert.Equal(t, true, ok, "state does not exist in cache")
assert.Equal(t, rInfo.root, boundaryRoot, "incorrect root of root state info")
assert.Equal(t, rInfo.state.Slot(), primitives.Slot(0), "incorrect slot of state")
}
func TestSaveState_CanSaveHotStateToDB(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()

View File

@@ -30,7 +30,7 @@ func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
}
func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
roots, err := OptimizedValidatorRoots(validators)
roots, err := optimizedValidatorRoots(validators)
if err != nil {
return [32]byte{}, err
}
@@ -51,9 +51,7 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
return res, nil
}
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
// derive a list of validator roots from a list of validator objects.
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
func optimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
// Exit early if no validators are provided.
if len(validators) == 0 {
return [][32]byte{}, nil

View File

@@ -2,68 +2,30 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"batch.go",
"batcher.go",
"pool.go",
"service.go",
"status.go",
"verify.go",
"worker.go",
],
srcs = ["status.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
"//time/slots:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"batcher_test.go",
"pool_test.go",
"service_test.go",
"status_test.go",
],
srcs = ["status_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/dbval:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,124 +0,0 @@
package backfill
import (
"fmt"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
var ErrChainBroken = errors.New("batch is not the ancestor of backfilled batch")
type batchState int
func (s batchState) String() string {
switch s {
case batchNil:
return "nil"
case batchInit:
return "init"
case batchSequenced:
return "sequenced"
case batchErrRetryable:
return "error_retryable"
case batchImportable:
return "importable"
case batchImportComplete:
return "import_complete"
case batchEndSequence:
return "end_sequence"
default:
return "unknown"
}
}
const (
batchNil batchState = iota
batchInit
batchSequenced
batchErrRetryable
batchImportable
batchImportComplete
batchEndSequence
)
type batchId string
type batch struct {
scheduled time.Time
seq int // sequence identifier, ie how many times has the sequence() method served this batch
retries int
begin primitives.Slot
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
results VerifiedROBlocks
err error
state batchState
pid peer.ID
}
func (b batch) logFields() log.Fields {
return map[string]interface{}{
"batch_id": b.id(),
"state": b.state.String(),
"scheduled": b.scheduled.String(),
"seq": b.seq,
"retries": b.retries,
"begin": b.begin,
"end": b.end,
"pid": b.pid,
}
}
func (b *batch) inc() {
b.seq += 1
}
func (b batch) replaces(r batch) bool {
if b.begin != r.begin {
return false
}
if b.end != r.end {
return false
}
return b.seq >= r.seq
}
func (b batch) id() batchId {
return batchId(fmt.Sprintf("%d:%d", b.begin, b.end))
}
func (b batch) size() primitives.Slot {
return b.end - b.begin
}
func (b batch) ensureParent(expected [32]byte) error {
tail := b.results[len(b.results)-1]
if tail.Root() != expected {
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
}
return nil
}
func (b batch) lowest() blocks.ROBlock {
return b.results[0]
}
func (b batch) request() *eth.BeaconBlocksByRangeRequest {
return &eth.BeaconBlocksByRangeRequest{
StartSlot: b.begin,
Count: uint64(b.end - b.begin),
Step: 1,
}
}
func (b batch) withRetryableError(err error) batch {
b.retries += 1
b.err = err
b.state = batchErrRetryable
return b
}

Some files were not shown because too many files have changed in this diff Show More