Compare commits

...

25 Commits

Author SHA1 Message Date
nisdas
7dbc6ef309 add loggin 2023-07-06 11:43:54 +08:00
nisdas
29df5a0c9a make it even worse 2023-07-05 17:48:57 +08:00
nisdas
c3f5844679 reduce cores 2023-07-05 17:41:08 +08:00
Nishant Das
2a4441762e Handle Epoch Boundary Misses (#12579)
* add changes

* fix tests

* fix edge case

* fix logging
2023-07-05 09:23:51 +00:00
Nishant Das
401fccc723 Log Finalized Deposit Insertion (#12593)
* add log

* update key

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-07-05 08:07:38 +00:00
Radosław Kapka
c80f88fc07 Rename payloadHash to lastValidHash in setOptimisticToInvalid (#12592) 2023-07-04 17:03:45 +00:00
Kevin Wood
faa0a2c4cf Correct log level for 'Could not send a chunked response' (#12562)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-07-02 22:58:12 +00:00
Nishant Das
c45cb7e188 Optimize Validator Roots Computation (#12585)
* add changes

* change it
2023-07-01 02:23:25 +00:00
0xalex88
0b10263dd5 Increase validator client startup proposer settings deadline (#12533)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-06-30 21:37:39 +00:00
Anukul Sangwan
3bc808352f run ineffassign for all code (#12578)
* run `ineffassign` for all code

* fix reported ineffassign errors

* remove redundant changes

* fix remaining ineffassign errors

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-06-29 15:38:26 +00:00
james-prysm
d0c740f477 Registration Cache used by default and other UX changes for Proposer settings (#12456)
* WIP

* WIP

* adding in migration function

* updating mock validator and gaz

* adding descriptive logs

* fixing mocking

* fixing tests

* fixing mock

* adding changes to handle enable builder settings

* fixing tests and edge case

* reduce cognative complexity of function

* further reducing cognative complexity on function

* WIP

* fixing unit test on migration

* adding more tests

* gaz and fix unit test

* fixing deepsource issues

* fixing more deesource issues missed previously

* removing unused reciever name

* WIP fix to migration logic

* fixing loging info

* reverting migration logic, converting logic to address issues discussed on slack, adding unit tests

* adding test for builder setting only not saved to db

* addressing comment

* fixing flag

* removing accidently missed deprecated flags

* rolling back mock on pr

* fixing fmt linting

* updating comments based on feedback

* Update config/features/flags.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* fixing based on feedback on PR

* Update config/validator/service/proposer_settings.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update validator/client/runner.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update validator/db/kv/proposer_settings.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* adding additional logs to clear up some steps based on feedback

* fixing log

* deepsource

* adding comments based on review feedback

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-06-29 02:49:21 +00:00
Preston Van Loon
cbe67f1970 Update protobuf and protobuf deps (#12569)
* Update protobuf and protobuf deps

* gazelle

* enforce c++14

* bump to c++17 since practically all modern compilers support it

* update protobuf again to resolve mac issues, bump c++20

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-28 14:50:43 +00:00
Potuz
5bb482e5d6 Remove forkchoice call from notify new payload (#12560)
* Remove forkchoice call from notify new payload

* add unit test
2023-06-28 13:38:24 +00:00
terencechain
83494c5b23 fix: use diff context to update proposer cache background (#12571) 2023-06-27 20:31:54 +00:00
terencechain
a10ffa9c0e Cache next epoch proposers at epoch boundary (#12484)
* Cache next epoch proposers at epoch boundary

* Fix new lines

* Use UpdateProposerIndicesInCache

* dont set state slot

* Update beacon_committee.go

* dont set state slot

* genesis epoch check

* Rm check

* fix: rm logging ctx

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* feat: move update to background

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-06-27 14:41:24 +00:00
Radosław Kapka
e545b57f26 Deflake cloners_test.go (#12566) 2023-06-26 15:43:00 +00:00
Preston Van Loon
c026b9e897 Set blst_modern=true to be the bazel default build (#12564)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-26 15:06:33 +00:00
Preston Van Loon
a19044051f Add hermetic_cc_toolchain for a hermetic cc toolchain (#12135)
* Add bazel-zig-cc for a hermetic cc toolchain

* gazelle

* Remove llvm

* remove wl

* Add new URLs for renamed repo

* gazelle

* Update to v2.0.0-rc1

* bump to rc2

* Some PR feedback

* use v2.0.0 from rc2

* Disable hermetic builds for mac and windows.

* bump bazel version, add darwin hack

* fix

* Add the no-op emtpy cc toolchain code

* typo and additional copy

* update protobuf and fix vaticle warning

* Revert "update protobuf and fix vaticle warning"

This reverts commit 7bb4b6b564.

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-26 14:31:40 +00:00
Potuz
1ebef16196 use the incoming payload status instead of calling forkchoice (#12559)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-22 18:09:02 +00:00
terencechain
8af634a6a0 feat: aggregate atts using fixed pool of go routines (#12553)
* feat: aggregate atts using fixed pool of go routines

* fix: deepsrc complains

* style: comment

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* feat: aggregate atts using fixed pool of go routines

* fix: deepsrc complains

* style: comment

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-06-22 17:48:42 +00:00
Potuz
884ba4959a Remove unneded helper (#12558)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-22 16:54:52 +00:00
terencechain
75e94120b4 fix(aggregator): remove single bit aggregation (#12555) 2023-06-22 09:34:25 -07:00
Sammy Rosso
20f4d21b83 Keymanager API: Add validator voluntary exit endpoint (#12299)
* Initial setup

* Fix + Cleanup

* Add query

* Fix

* Add epoch

* James' review part 1

* James' review part 2

* James' review part 3

* Radek' review

* Gazelle

* Fix cycle

* Start unit test

* fixing part of the test

* Mostly fix test

* Fix tests

* Cleanup

* Handle error

* Remove times

* Fix all tests

* Fix accidental deletion

* Unmarshal epoch

* Add custom_type

* Small fix

* Fix epoch

* Lint fix

* Add test + fix empty query panic

* Add comment

* Fix regex

* Add correct error message

* Change current epoch to use slot

* Return error if incorrect epoch passed

* Remove redundant type conversion

* Fix tests

* gaz

* Remove nodeClient + pass slot

* Remove slot from parameters

* Fix tests

* Fix test attempt 2

* Fix test attempt 2

* Remove nodeClient from ProposeExit

* Fix

* Fix tests

---------

Co-authored-by: james-prysm <james@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-06-21 14:06:16 -05:00
Bryce T
c018981951 Add expected withdrawals API (#12519)
* add structs for expected-withdrawals-api

* add server handler

* add tests

* add bazel file

* register api in service

* remove get prefix for endpoint

* fix review comments

* Update beacon-chain/rpc/eth/builder/handlers.go

* use goimports sorting type

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-06-21 14:36:47 +00:00
Radosław Kapka
b92226bedb getAttestationRewards API endpoint (#12480)
* handler

* very much work in progress

* remove Polish

* thinking

* working but differs from LH

* remove old stuff

* review from Potuz

* validator performance beacon server

* Revert "validator performance beacon server"

This reverts commit 42464cc6d3.

* reuse precompute calculations

* todos

* production quality

* add json tags to AttestationRewards

* Potuz's review

* extract vars

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-06-21 13:16:53 +00:00
98 changed files with 3358 additions and 884 deletions

View File

@@ -3,6 +3,7 @@ import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
@@ -14,7 +15,7 @@ coverage --define=coverage_enabled=1
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false
build --define blst_disabled=false --define blst_modern=true
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -27,30 +28,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
@@ -59,9 +37,13 @@ build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# Abseil requires c++14 or greater.
build --cxxopt=-std=c++20
build --host_cxxopt=-std=c++20

View File

@@ -1 +1 @@
6.1.0
6.2.1

View File

@@ -3,7 +3,6 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"

View File

@@ -16,27 +16,37 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0"
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
name = "hermetic_cc_toolchain",
sha256 = "57f03a6c29793e8add7bd64186fc8066d23b5ffd06fe9cc6b0b8c499914d3a65",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
bazel_toolchain_dependencies()
zig_toolchains()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
llvm_register_toolchains()
configure_nonhermetic_darwin()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
@@ -311,11 +321,13 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
git_repository(
http_archive(
name = "com_google_protobuf",
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1617835118 -0700",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
strip_prefix = "protobuf-23.3",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
],
)
# Group the sources of the library so that CMake rule have access to it

View File

@@ -128,6 +128,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -226,6 +227,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -399,6 +401,7 @@ func TestDownloadFinalizedData(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))

View File

@@ -41,13 +41,15 @@ var (
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
root [32]byte
lastValidHash [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
LastValidHash() [32]byte
}
// BlockRoot returns the invalid block root.
@@ -55,6 +57,11 @@ func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// LastValidHash returns the last valid hash root.
func (e invalidBlock) LastValidHash() [32]byte {
return e.lastValidHash
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
@@ -72,6 +79,19 @@ func IsInvalidBlock(e error) bool {
return true
}
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
// doesn't have a last valid hash, [32]byte{} is returned.
func InvalidBlockLVH(e error) [32]byte {
if e == nil {
return [32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [32]byte{}
}
return d.LastValidHash()
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {

View File

@@ -220,35 +220,37 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
case execution.ErrInvalidBlockHashPayloadStatus:
newPayloadInvalidNodeCount.Inc()
return false, ErrInvalidBlockHashPayloadStatus
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) reportInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {

View File

@@ -743,6 +743,37 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
require.Equal(t, true, validated)
}
func Test_reportInvalidBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
jcp := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.reportInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx

View File

@@ -107,7 +107,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
// Verify that the parent block is in forkchoice
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
parentRoot := b.ParentRoot()
if !s.cfg.ForkChoiceStore.HasNode(parentRoot) {
return ErrNotDescendantOfFinalized
}
@@ -134,6 +135,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
if err != nil {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.reportInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return errors.Wrap(err, "could not validate new payload")
}
if signed.Version() < version.Capella && isValidPayload {
@@ -145,8 +149,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
@@ -186,7 +189,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
@@ -257,10 +259,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
@@ -270,7 +268,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isOptimistic,
ExecutionOptimistic: isValidPayload,
},
})
@@ -498,9 +496,20 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
e := coreTime.CurrentEpoch(copied)
if err := helpers.UpdateProposerIndicesInCache(ctx, copied, e); err != nil {
return err
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateProposerIndicesInCache(slotCtx, copied, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
@@ -512,7 +521,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
@@ -524,27 +533,9 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
}
return nil
}
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
}
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
@@ -213,6 +214,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
defer span.End()
startTime := time.Now()
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
@@ -237,6 +239,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
@@ -408,7 +409,7 @@ func (s *Service) initializeBeaconChain(
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
return nil, err
}

View File

@@ -377,9 +377,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -453,9 +451,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -13,6 +13,15 @@ import (
"go.opencensus.io/trace"
)
// AttDelta contains rewards and penalties for a single attestation.
type AttDelta struct {
HeadReward uint64
SourceReward uint64
SourcePenalty uint64
TargetReward uint64
TargetPenalty uint64
}
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
@@ -226,7 +235,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
attDeltas, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
@@ -237,11 +246,12 @@ func ProcessRewardsAndPenaltiesPrecompute(
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
delta := attDeltas[i]
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], delta.HeadReward+delta.SourceReward+delta.TargetReward)
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
vals[i].AfterEpochTransitionBalance = balances[i]
}
@@ -255,10 +265,8 @@ func ProcessRewardsAndPenaltiesPrecompute(
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
attDeltas := make([]*AttDelta, len(vals))
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
@@ -272,29 +280,29 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
bias := cfg.InactivityScoreBias
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
if err != nil {
return nil, nil, err
return nil, err
}
inactivityDenominator := bias * inactivityPenaltyQuotient
for i, v := range vals {
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, nil, err
return nil, err
}
}
return rewards, penalties, nil
return attDeltas, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (reward, penalty uint64, err error) {
inactivityLeak bool) (*AttDelta, error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return 0, 0, nil
return &AttDelta{}, nil
}
cfg := params.BeaconConfig()
@@ -307,32 +315,32 @@ func attestationDelta(
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
reward, penalty = uint64(0), uint64(0)
attDelta := &AttDelta{}
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
reward += n / (activeIncrement * weightDenominator)
attDelta.SourceReward += n / (activeIncrement * weightDenominator)
}
} else {
penalty += baseReward * srcWeight / weightDenominator
attDelta.SourcePenalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
reward += n / (activeIncrement * weightDenominator)
attDelta.TargetReward += n / (activeIncrement * weightDenominator)
}
} else {
penalty += baseReward * tgtWeight / weightDenominator
attDelta.TargetPenalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
reward += n / (activeIncrement * weightDenominator)
attDelta.HeadReward += n / (activeIncrement * weightDenominator)
}
}
@@ -341,10 +349,10 @@ func attestationDelta(
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return 0, 0, err
return &AttDelta{}, err
}
penalty += n / inactivityDenominator
attDelta.TargetPenalty += n / inactivityDenominator
}
return reward, penalty, nil
return attDelta, nil
}

View File

@@ -213,9 +213,16 @@ func TestAttestationsDelta(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
deltas, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -244,9 +251,16 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
deltas, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -285,8 +299,15 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
}
wanted := make([]uint64, s.NumValidators())
rewards, penalties, err := AttestationsDelta(s, balance, validators)
deltas, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
for i := range rewards {
wanted[i] += rewards[i]
}

View File

@@ -336,20 +336,21 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
}
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState) error {
// Input state is used to retrieve active validator indices.
// Input epoch is the epoch to retrieve proposer indices for.
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
// Which is the reason why we skip genesis epoch.
if time.CurrentEpoch(state) <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
// Use state root from (current_epoch - 1))
wantedEpoch := time.PrevEpoch(state)
s, err := slots.EpochEnd(wantedEpoch)
s, err := slots.EpochEnd(epoch - 1)
if err != nil {
return err
}
r, err := StateRootAtSlot(state, s)
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
return err
}
@@ -366,11 +367,11 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
return nil
}
indices, err := ActiveValidatorIndices(ctx, state, time.CurrentEpoch(state))
indices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices)
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
if err != nil {
return err
}
@@ -432,11 +433,10 @@ func computeCommittee(
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex) ([]primitives.ValidatorIndex, error) {
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
e := time.CurrentEpoch(state)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return nil, errors.Wrap(err, "could not generate seed")

View File

@@ -639,7 +639,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex

View File

@@ -262,7 +262,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
@@ -16,7 +16,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
// return early if the parent is LVH
if node.payloadHash == payloadHash {
if node.payloadHash == lastValidHash {
return invalidRoots, nil
}
} else {
@@ -28,7 +28,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}

View File

@@ -987,7 +987,8 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
if cliCtx.Bool(flags.EnableRegistrationCache.Name) {
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())
}
svc, err := builder.NewService(b.ctx, opts...)

View File

@@ -47,6 +47,7 @@ go_test(
deps = [
"//async:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -14,6 +14,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
@@ -39,8 +40,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -2,9 +2,12 @@ package kv
import (
"context"
"runtime"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -23,21 +26,11 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
if err != nil {
return err
}
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
}
// AggregateUnaggregatedAttestationsBySlotIndex aggregates the unaggregated attestations and saves
// newly aggregated attestations in the pool. Unaggregated attestations are filtered by slot and
// committee index.
func (c *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregateUnaggregatedAttestationsBySlotIndex")
defer span.End()
unaggregatedAtts := c.UnaggregatedAttestationsBySlotIndex(ctx, slot, committeeIndex)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
defer span.End()
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
@@ -52,26 +45,32 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
// Aggregate unaggregated attestations from the pool and save them in the pool.
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if features.Get().AggregateParallel {
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
} else {
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return err
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
}
leftOverUnaggregatedAtt[h] = true
}
}
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
@@ -88,6 +87,58 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
return nil
}
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
// returns the unaggregated attestations that weren't able to aggregate.
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
var leftoverLock sync.Mutex
wg := sync.WaitGroup{}
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
ch := make(chan []*ethpb.Attestation, n)
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer wg.Done()
for as := range ch {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
if err != nil {
log.WithError(err).Error("could not aggregate unaggregated attestations")
continue
}
if aggregated == nil {
log.Error("nil aggregated attestation")
continue
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
}
} else {
h, err := hashFn(aggregated)
if err != nil {
log.WithError(err).Error("could not hash attestation")
continue
}
leftoverLock.Lock()
leftOver[h] = true
leftoverLock.Unlock()
}
}
}()
}
for _, as := range atts {
ch <- as
}
close(ch)
wg.Wait()
return leftOver
}
// SaveAggregatedAttestation saves an aggregated attestation in cache.
func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
@@ -165,7 +216,7 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]*ethpb.Attestation, 0)

View File

@@ -9,7 +9,7 @@ import (
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -18,6 +18,11 @@ import (
)
func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
cache := NewAttCaches()
priv, err := bls.RandKey()
require.NoError(t, err)
@@ -39,61 +44,6 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 2, 0)), "Did not aggregate correctly")
}
func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T) {
cache := NewAttCaches()
genData := func(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) *ethpb.AttestationData {
return util.HydrateAttestationData(&ethpb.AttestationData{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
genSign := func() []byte {
priv, err := bls.RandKey()
require.NoError(t, err)
return priv.Sign([]byte{'a'}).Marshal()
}
atts := []*ethpb.Attestation{
// The first slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 3), Signature: genSign()},
// The second slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(2, 4), Signature: genSign()},
}
ctx := context.Background()
// Make sure that no error is produced if aggregation is requested on empty unaggregated list.
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Persist unaggregated attestations, and aggregate on per slot/committee index base.
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
// Committee attestations at a slot should be aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
// Committee attestations haven't been aggregated.
require.Equal(t, 2, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Committee at a second slot is aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 3)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 3)), "Did not aggregate correctly")
// The second committee at second slot is not aggregated.
require.Equal(t, 1, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 4)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 4)), "Did not aggregate correctly")
}
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -15,7 +15,6 @@ import (
type Pool interface {
// For Aggregated attestations
AggregateUnaggregatedAttestations(ctx context.Context) error
AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error
SaveAggregatedAttestation(att *ethpb.Attestation) error
SaveAggregatedAttestations(atts []*ethpb.Attestation) error
AggregatedAttestations() []*ethpb.Attestation

View File

@@ -7,6 +7,7 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -17,6 +18,11 @@ import (
)
func TestBatchAttestations_Multiple(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)

View File

@@ -25,6 +25,7 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",

View File

@@ -0,0 +1,48 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//proto/engine/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//network:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
],
)

View File

@@ -0,0 +1,131 @@
package builder
import (
"fmt"
"net/http"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ExpectedWithdrawals get the withdrawals computed from the specified state, that will be included in the block that gets built on the specified state.
func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Retrieve beacon state
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
network.WriteError(w, &network.DefaultErrorJson{
Message: "state_id is required in URL params",
Code: http.StatusBadRequest,
})
return
}
st, err := s.Stater.State(r.Context(), []byte(stateId))
if err != nil {
network.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
return
}
queryParam := r.URL.Query().Get("proposal_slot")
var proposalSlot primitives.Slot
if queryParam != "" {
pSlot, err := strconv.ParseUint(queryParam, 10, 64)
if err != nil {
network.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
return
}
proposalSlot = primitives.Slot(pSlot)
} else {
proposalSlot = st.Slot() + 1
}
// Perform sanity checks on proposal slot before computing state
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
return
}
if proposalSlot < capellaStart {
network.WriteError(w, &network.DefaultErrorJson{
Message: "expected withdrawals are not supported before Capella fork",
Code: http.StatusBadRequest,
})
return
}
if proposalSlot <= st.Slot() {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot must be bigger than state slot. proposal slot: %d, state slot: %d", proposalSlot, st.Slot()),
Code: http.StatusBadRequest,
})
return
}
lookAheadLimit := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().MaxSeedLookahead)))
if st.Slot().Add(lookAheadLimit) <= proposalSlot {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot cannot be >= %d slots ahead of state slot", lookAheadLimit),
Code: http.StatusBadRequest,
})
return
}
// Get metadata for response
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return
}
var blockRoot = [32]byte(root)
isFinalized := s.FinalizationFetcher.IsFinalized(r.Context(), blockRoot)
// Advance state forward to proposal slot
st, err = transition.ProcessSlots(r.Context(), st, proposalSlot)
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not process slots",
Code: http.StatusInternalServerError,
})
return
}
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not get expected withdrawals",
Code: http.StatusInternalServerError,
})
return
}
network.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
})
}
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*ExpectedWithdrawal {
data := make([]*ExpectedWithdrawal, len(withdrawals))
for i, withdrawal := range withdrawals {
data[i] = &ExpectedWithdrawal{
Address: hexutil.Encode(withdrawal.Address),
Amount: strconv.FormatUint(withdrawal.Amount, 10),
Index: strconv.FormatUint(withdrawal.Index, 10),
ValidatorIndex: strconv.FormatUint(uint64(withdrawal.ValidatorIndex), 10),
}
}
return data
}
func handleWrapError(err error, message string, code int) *network.DefaultErrorJson {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, message).Error(),
Code: code,
}
}

View File

@@ -0,0 +1,210 @@
package builder
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestExpectedWithdrawals_BadRequest(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
testCases := []struct {
name string
path string
urlParams map[string]string
state state.BeaconState
errorMessage string
}{
{
name: "no state_id url params",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot" +
strconv.FormatUint(uint64(currentSlot), 10),
urlParams: map[string]string{},
state: nil,
errorMessage: "state_id is required in URL params",
},
{
name: "invalid proposal slot value",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=aaa",
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "invalid proposal slot value",
},
{
name: "proposal slot < Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot)-1, 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "expected withdrawals are not supported before Capella fork",
},
{
name: "proposal slot == Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot must be bigger than state slot",
},
{
name: "Proposal slot >= 128 slots ahead of state slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(currentSlot+128), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot cannot be >= 128 slots ahead of state slot",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: testCase.state},
}
request := httptest.NewRequest("GET", testCase.path, nil)
request = mux.SetURLVars(request, testCase.urlParams)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, testCase.errorMessage, e.Message)
})
}
}
func TestExpectedWithdrawals(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
t.Run("get correct expected withdrawals", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.MaxValidatorsPerWithdrawalsSweep = 16
params.OverrideBeaconConfig(cfg)
// Update state with updated validator fields
valCount := 17
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
val := &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
validators = append(validators, val)
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
epoch := slots.ToEpoch(st.Slot())
// Fully withdrawable now with more than 0 balance
validators[5].WithdrawableEpoch = epoch
// Fully withdrawable now but 0 balance
validators[10].WithdrawableEpoch = epoch
balances[10] = 0
// Partially withdrawable now but fully withdrawable after 1 epoch
validators[14].WithdrawableEpoch = epoch + 1
balances[14] += params.BeaconConfig().MinDepositAmount
// Partially withdrawable
validators[15].WithdrawableEpoch = epoch + 2
balances[15] += params.BeaconConfig().MinDepositAmount
// Above sweep bound
validators[16].WithdrawableEpoch = epoch + 1
balances[16] += params.BeaconConfig().MinDepositAmount
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
inactivityScores := make([]uint64, valCount)
for i := range inactivityScores {
inactivityScores[i] = 10
}
require.NoError(t, st.SetInactivityScores(inactivityScores))
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: st},
}
request := httptest.NewRequest(
"GET", "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot="+
strconv.FormatUint(uint64(currentSlot+params.BeaconConfig().SlotsPerEpoch), 10), nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ExpectedWithdrawalsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
assert.Equal(t, false, resp.Finalized)
assert.Equal(t, 3, len(resp.Data))
expectedWithdrawal1 := &ExpectedWithdrawal{
Index: strconv.FormatUint(0, 10),
ValidatorIndex: strconv.FormatUint(5, 10),
Address: hexutil.Encode(validators[5].WithdrawalCredentials[12:]),
// Decreased due to epoch processing when state advanced forward
Amount: strconv.FormatUint(31998257885, 10),
}
expectedWithdrawal2 := &ExpectedWithdrawal{
Index: strconv.FormatUint(1, 10),
ValidatorIndex: strconv.FormatUint(14, 10),
Address: hexutil.Encode(validators[14].WithdrawalCredentials[12:]),
// MaxEffectiveBalance + MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(32998257885, 10),
}
expectedWithdrawal3 := &ExpectedWithdrawal{
Index: strconv.FormatUint(2, 10),
ValidatorIndex: strconv.FormatUint(15, 10),
Address: hexutil.Encode(validators[15].WithdrawalCredentials[12:]),
// MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(998257885, 10),
}
require.DeepEqual(t, expectedWithdrawal1, resp.Data[0])
require.DeepEqual(t, expectedWithdrawal2, resp.Data[1])
require.DeepEqual(t, expectedWithdrawal3, resp.Data[2])
})
}

View File

@@ -0,0 +1,12 @@
package builder
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
)
type Server struct {
FinalizationFetcher blockchain.FinalizationFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Stater lookup.Stater
}

View File

@@ -0,0 +1,14 @@
package builder
type ExpectedWithdrawalsResponse struct {
Data []*ExpectedWithdrawal `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ExpectedWithdrawal struct {
Address string `json:"address" hex:"true"`
Amount string `json:"amount"`
Index string `json:"index"`
ValidatorIndex string `json:"validator_index"`
}

View File

@@ -13,14 +13,21 @@ go_library(
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
],
)
@@ -33,6 +40,7 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -1,6 +1,8 @@
package rewards
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
@@ -8,12 +10,19 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
coreblocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/wealdtech/go-bytesutil"
)
// BlockRewards is an HTTP handler for Beacon API getBlockRewards.
@@ -28,7 +37,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
}
if blk.Version() == version.Phase0 {
errJson := &network.DefaultErrorJson{
Message: "block rewards are not supported for Phase 0 blocks",
Message: "Block rewards are not supported for Phase 0 blocks",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
@@ -41,7 +50,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get state").Error(),
Message: "Could not get state: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -52,7 +61,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
initBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -61,7 +70,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = altair.ProcessAttestationsNoVerifySignature(r.Context(), st, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get attestation rewards").Error(),
Message: "Could not get attestation rewards" + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -70,7 +79,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
attBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -79,7 +88,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = coreblocks.ProcessAttesterSlashings(r.Context(), st, blk.Block().Body().AttesterSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get attester slashing rewards").Error(),
Message: "Could not get attester slashing rewards: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -88,7 +97,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
attSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -97,7 +106,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
st, err = coreblocks.ProcessProposerSlashings(r.Context(), st, blk.Block().Body().ProposerSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get proposer slashing rewards").Error(),
Message: "Could not get proposer slashing rewards" + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -106,7 +115,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
proposerSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -115,7 +124,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
sa, err := blk.Block().Body().SyncAggregate()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get sync aggregate").Error(),
Message: "Could not get sync aggregate: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -125,7 +134,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
_, syncCommitteeReward, err = altair.ProcessSyncAggregate(r.Context(), st, sa)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get sync aggregate rewards").Error(),
Message: "Could not get sync aggregate rewards: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -135,7 +144,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get optimistic mode info").Error(),
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -144,7 +153,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get block root").Error(),
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
@@ -152,7 +161,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
}
response := &BlockRewardsResponse{
Data: &BlockRewards{
Data: BlockRewards{
ProposerIndex: strconv.FormatUint(uint64(proposerIndex), 10),
Total: strconv.FormatUint(proposerSlashingsBalance-initBalance+syncCommitteeReward, 10),
Attestations: strconv.FormatUint(attBalance-initBalance, 10),
@@ -166,22 +175,300 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
network.WriteJson(w, response)
}
// AttestationRewards retrieves attestation reward info for validators specified by array of public keys or validator index.
// If no array is provided, return reward info for every validator.
// TODO: Inclusion delay
func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
st, ok := s.attRewardsState(w, r)
if !ok {
return
}
bal, vals, valIndices, ok := attRewardsBalancesAndVals(w, r, st)
if !ok {
return
}
totalRewards, ok := totalAttRewards(w, st, bal, vals, valIndices)
if !ok {
return
}
idealRewards, ok := idealAttRewards(w, st, bal, vals)
if !ok {
return
}
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
blkRoot, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
resp := &AttestationRewardsResponse{
Data: AttestationRewards{
IdealRewards: idealRewards,
TotalRewards: totalRewards,
},
ExecutionOptimistic: optimistic,
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
}
network.WriteJson(w, resp)
}
func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.BeaconState, bool) {
segments := strings.Split(r.URL.Path, "/")
requestedEpoch, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode epoch: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, false
}
if primitives.Epoch(requestedEpoch) < params.BeaconConfig().AltairForkEpoch {
errJson := &network.DefaultErrorJson{
Message: "Attestation rewards are not supported for Phase 0",
Code: http.StatusNotFound,
}
network.WriteError(w, errJson)
return nil, false
}
currentEpoch := uint64(slots.ToEpoch(s.TimeFetcher.CurrentSlot()))
if requestedEpoch+1 >= currentEpoch {
errJson := &network.DefaultErrorJson{
Code: http.StatusNotFound,
Message: "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion",
}
network.WriteError(w, errJson)
return nil, false
}
nextEpochEnd, err := slots.EpochEnd(primitives.Epoch(requestedEpoch + 1))
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get next epoch's ending slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get state for epoch's starting slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
return st, true
}
func attRewardsBalancesAndVals(
w http.ResponseWriter,
r *http.Request,
st state.BeaconState,
) (*precompute.Balance, []*precompute.Validator, []primitives.ValidatorIndex, bool) {
allVals, bal, err := altair.InitializePrecomputeValidators(r.Context(), st)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not initialize precompute validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
allVals, bal, err = altair.ProcessEpochParticipation(r.Context(), st, bal, allVals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not process epoch participation: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
var rawValIds []string
if r.Body != http.NoBody {
if err = json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
}
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
for i, v := range rawValIds {
index, err := strconv.ParseUint(v, 10, 64)
if err != nil {
pubkey, err := bytesutil.FromHexString(v)
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
var ok bool
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
if !ok {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
} else {
if index >= uint64(st.NumValidators()) {
errJson := &network.DefaultErrorJson{
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return nil, nil, nil, false
}
valIndices[i] = primitives.ValidatorIndex(index)
}
}
if len(valIndices) == 0 {
valIndices = make([]primitives.ValidatorIndex, len(allVals))
for i := 0; i < len(allVals); i++ {
valIndices[i] = primitives.ValidatorIndex(i)
}
}
if len(valIndices) == len(allVals) {
return bal, allVals, valIndices, true
} else {
filteredVals := make([]*precompute.Validator, len(valIndices))
for i, valIx := range valIndices {
filteredVals[i] = allVals[valIx]
}
return bal, filteredVals, valIndices, true
}
}
// idealAttRewards returns rewards for hypothetical, perfectly voting validators
// whose effective balances are over EJECTION_BALANCE and match balances in passed in validators.
func idealAttRewards(
w http.ResponseWriter,
st state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
) ([]IdealAttestationReward, bool) {
idealValsCount := uint64(16)
minIdealBalance := uint64(17)
maxIdealBalance := minIdealBalance + idealValsCount - 1
idealRewards := make([]IdealAttestationReward, 0, idealValsCount)
idealVals := make([]*precompute.Validator, 0, idealValsCount)
increment := params.BeaconConfig().EffectiveBalanceIncrement
for i := minIdealBalance; i <= maxIdealBalance; i++ {
for _, v := range vals {
if v.CurrentEpochEffectiveBalance/1e9 == i {
effectiveBalance := i * increment
idealVals = append(idealVals, &precompute.Validator{
IsActivePrevEpoch: true,
IsSlashed: false,
CurrentEpochEffectiveBalance: effectiveBalance,
IsPrevEpochSourceAttester: true,
IsPrevEpochTargetAttester: true,
IsPrevEpochHeadAttester: true,
})
idealRewards = append(idealRewards, IdealAttestationReward{EffectiveBalance: strconv.FormatUint(effectiveBalance, 10)})
break
}
}
}
deltas, err := altair.AttestationsDelta(st, bal, idealVals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
idealRewards[i].Head = strconv.FormatUint(d.HeadReward, 10)
if d.SourcePenalty > 0 {
idealRewards[i].Source = fmt.Sprintf("-%s", strconv.FormatUint(d.SourcePenalty, 10))
} else {
idealRewards[i].Source = strconv.FormatUint(d.SourceReward, 10)
}
if d.TargetPenalty > 0 {
idealRewards[i].Target = fmt.Sprintf("-%s", strconv.FormatUint(d.TargetPenalty, 10))
} else {
idealRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
}
}
return idealRewards, true
}
func totalAttRewards(
w http.ResponseWriter,
st state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
valIndices []primitives.ValidatorIndex,
) ([]TotalAttestationReward, bool) {
totalRewards := make([]TotalAttestationReward, len(valIndices))
for i, v := range valIndices {
totalRewards[i] = TotalAttestationReward{ValidatorIndex: strconv.FormatUint(uint64(v), 10)}
}
deltas, err := altair.AttestationsDelta(st, bal, vals)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
totalRewards[i].Head = strconv.FormatUint(d.HeadReward, 10)
if d.SourcePenalty > 0 {
totalRewards[i].Source = fmt.Sprintf("-%s", strconv.FormatUint(d.SourcePenalty, 10))
} else {
totalRewards[i].Source = strconv.FormatUint(d.SourceReward, 10)
}
if d.TargetPenalty > 0 {
totalRewards[i].Target = fmt.Sprintf("-%s", strconv.FormatUint(d.TargetPenalty, 10))
} else {
totalRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
}
}
return totalRewards, true
}
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
if errors.Is(err, lookup.BlockIdParseError{}) {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, "invalid block ID").Error(),
Message: "Invalid block ID: " + err.Error(),
Code: http.StatusBadRequest,
}
}
if err != nil {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not get block from block ID").Error(),
Message: "Could not get block from block ID: " + err.Error(),
Code: http.StatusInternalServerError,
}
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, "could not find requested block").Error(),
Message: "Could not find requested block" + err.Error(),
Code: http.StatusNotFound,
}
}

View File

@@ -4,8 +4,11 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/prysmaticlabs/go-bitfield"
@@ -13,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockstategen "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen/mock"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -32,7 +36,7 @@ import (
func TestBlockRewards(t *testing.T) {
valCount := 64
st, err := util.NewBeaconStateAltair()
st, err := util.NewBeaconStateCapella()
require.NoError(t, st.SetSlot(1))
require.NoError(t, err)
validators := make([]*eth.Validator, 0, valCount)
@@ -193,6 +197,285 @@ func TestBlockRewards(t *testing.T) {
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "block rewards are not supported for Phase 0 blocks", e.Message)
assert.Equal(t, "Block rewards are not supported for Phase 0 blocks", e.Message)
})
}
func TestAttestationRewards(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
params.OverrideBeaconConfig(cfg)
valCount := 64
st, err := util.NewBeaconStateCapella()
require.NoError(t, err)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*3-1))
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
secretKeys := make([]bls.SecretKey, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
secretKeys = append(secretKeys, blsKey)
validators = append(validators, &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance / 64 * uint64(i+1),
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance/64*uint64(i+1))
}
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
participation := make([]byte, len(validators))
for i := range participation {
participation[i] = 0b111
}
require.NoError(t, st.SetCurrentParticipationBits(participation))
require.NoError(t, st.SetPreviousParticipationBits(participation))
currentSlot := params.BeaconConfig().SlotsPerEpoch * 3
mockChainService := &mock.ChainService{Optimistic: true, Slot: &currentSlot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
}},
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
}
t.Run("ok - ideal rewards", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 16, len(resp.Data.IdealRewards))
sum := uint64(0)
for _, r := range resp.Data.IdealRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(20756849), sum)
})
t.Run("ok - filtered vals", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
valIds, err := json.Marshal([]string{"20", pubkey})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data.TotalRewards))
sum := uint64(0)
for _, r := range resp.Data.TotalRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(794265), sum)
})
t.Run("ok - all vals", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 64, len(resp.Data.TotalRewards))
sum := uint64(0)
for _, r := range resp.Data.TotalRewards {
hr, err := strconv.ParseUint(r.Head, 10, 64)
require.NoError(t, err)
sr, err := strconv.ParseUint(r.Source, 10, 64)
require.NoError(t, err)
tr, err := strconv.ParseUint(r.Target, 10, 64)
require.NoError(t, err)
sum += hr + sr + tr
}
assert.Equal(t, uint64(54221955), sum)
})
t.Run("ok - penalty", func(t *testing.T) {
st, err := util.NewBeaconStateCapella()
require.NoError(t, err)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*3-1))
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
secretKeys := make([]bls.SecretKey, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
secretKeys = append(secretKeys, blsKey)
validators = append(validators, &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance / 64 * uint64(i),
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance/64*uint64(i))
}
validators[63].Slashed = true
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
participation := make([]byte, len(validators))
for i := range participation {
participation[i] = 0b111
}
require.NoError(t, st.SetCurrentParticipationBits(participation))
require.NoError(t, st.SetPreviousParticipationBits(participation))
currentSlot := params.BeaconConfig().SlotsPerEpoch * 3
mockChainService := &mock.ChainService{Optimistic: true, Slot: &currentSlot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
}},
TimeFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
}
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"63"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &AttestationRewardsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "0", resp.Data.TotalRewards[0].Head)
assert.Equal(t, "-432270", resp.Data.TotalRewards[0].Source)
assert.Equal(t, "-802788", resp.Data.TotalRewards[0].Target)
})
t.Run("invalid validator index/pubkey", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"10", "foo"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
})
t.Run("unknown validator pubkey", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
privkey, err := bls.RandKey()
require.NoError(t, err)
pubkey := fmt.Sprintf("%#x", privkey.PublicKey().Marshal())
valIds, err := json.Marshal([]string{"10", pubkey})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
})
t.Run("validator index too large", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/1"
var body bytes.Buffer
valIds, err := json.Marshal([]string{"10", "999"})
require.NoError(t, err)
_, err = body.Write(valIds)
require.NoError(t, err)
request := httptest.NewRequest("POST", url, &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Validator index 999 is too large. Maximum allowed index is 63", e.Message)
})
t.Run("phase 0", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/0"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are not supported for Phase 0", e.Message)
})
t.Run("invalid epoch", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/foo"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "Could not decode epoch"))
})
t.Run("previous epoch", func(t *testing.T) {
url := "http://only.the.epoch.number.at.the.end.is.important/2"
request := httptest.NewRequest("POST", url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion", e.Message)
})
}

View File

@@ -11,4 +11,8 @@ type Server struct {
OptimisticModeFetcher blockchain.OptimisticModeFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ReplayerBuilder stategen.ReplayerBuilder
// TODO: Init
TimeFetcher blockchain.TimeFetcher
Stater lookup.Stater
HeadFetcher blockchain.HeadFetcher
}

View File

@@ -1,9 +1,9 @@
package rewards
type BlockRewardsResponse struct {
Data *BlockRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data BlockRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type BlockRewards struct {
@@ -14,3 +14,29 @@ type BlockRewards struct {
ProposerSlashings string `json:"proposer_slashings"`
AttesterSlashings string `json:"attester_slashings"`
}
type AttestationRewardsResponse struct {
Data AttestationRewards `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type AttestationRewards struct {
IdealRewards []IdealAttestationReward `json:"ideal_rewards"`
TotalRewards []TotalAttestationReward `json:"total_rewards"`
}
type IdealAttestationReward struct {
EffectiveBalance string `json:"effective_balance"`
Head string `json:"head"`
Target string `json:"target"`
Source string `json:"source"`
}
type TotalAttestationReward struct {
ValidatorIndex string `json:"validator_index"`
Head string `json:"head"`
Target string `json:"target"`
Source string `json:"source"`
InclusionDelay string `json:"inclusion_delay"`
}

View File

@@ -66,11 +66,7 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
return nil, status.Errorf(codes.InvalidArgument, "Validator is not an aggregator")
}
if err := vs.AttPool.AggregateUnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex); err != nil {
return nil, status.Errorf(codes.Internal, "Could not aggregate unaggregated attestations")
}
aggregatedAtts := vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
// Filter out the best aggregated attestation (ie. the one with the most aggregated bits).
if len(aggregatedAtts) == 0 {
aggregatedAtts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)

View File

@@ -71,6 +71,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
blockchain.LateBlockAttemptedReorgCount.Inc()
}
log.Infof("Updated head at %s", time.Since(t))
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
if err := vs.optimisticStatus(ctx); err != nil {
@@ -91,6 +93,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
}
log.Infof("Processed slots at %s", time.Since(t))
// Set slot, graffiti, randao reveal, and parent root.
sBlk.SetSlot(req.Slot)
sBlk.SetGraffiti(req.Graffiti)
@@ -104,8 +108,10 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
sBlk.SetProposerIndex(idx)
log.Infof("Retrieved proposer index at %s", time.Since(t))
if features.Get().BuildBlockParallel {
if err := vs.BuildBlockParallel(ctx, sBlk, head); err != nil {
if err := vs.BuildBlockParallel(ctx, sBlk, head, t); err != nil {
return nil, errors.Wrap(err, "could not build block in parallel")
}
} else {
@@ -162,6 +168,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
}
sBlk.SetStateRoot(sr)
log.Infof("Computed state root at %s", time.Since(t))
log.WithFields(logrus.Fields{
"slot": req.Slot,
@@ -191,7 +198,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
}
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState) error {
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState, startTime time.Time) error {
// Build consensus fields in background
var wg sync.WaitGroup
wg.Add(1)
@@ -205,6 +212,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
log.WithError(err).Error("Could not get eth1data")
}
sBlk.SetEth1Data(eth1Data)
log.Infof("checked eth1data majority at %s", time.Since(startTime))
// Set deposit and attestation.
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
@@ -216,20 +224,27 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
sBlk.SetDeposits(deposits)
sBlk.SetAttestations(atts)
}
log.Infof("packed deposits and attestations at %s", time.Since(startTime))
// Set slashings.
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
sBlk.SetProposerSlashings(validProposerSlashings)
sBlk.SetAttesterSlashings(validAttSlashings)
log.Infof("got slashings at %s", time.Since(startTime))
// Set exits.
sBlk.SetVoluntaryExits(vs.getExits(head, sBlk.Block().Slot()))
log.Infof("set voluntary exits at %s", time.Since(startTime))
// Set sync aggregate. New in Altair.
vs.setSyncAggregate(ctx, sBlk)
log.Infof("set sync aggregate at %s", time.Since(startTime))
// Set bls to execution change. New in Capella.
vs.setBlsToExecData(sBlk, head)
log.Infof("set bls data at %s", time.Since(startTime))
}()
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
@@ -246,6 +261,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
return status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
log.Infof("set execution data at %s", time.Since(startTime))
wg.Wait() // Wait until block is built via consensus and execution fields.

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"math/big"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -23,6 +24,7 @@ func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.Be
var atts []*ethpb.Attestation
eg.Go(func() error {
stTime := time.Now()
// Pack ETH1 deposits which have not been included in the beacon chain.
localDeposits, err := vs.deposits(egctx, head, eth1Data)
if err != nil {
@@ -35,10 +37,13 @@ func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.Be
default:
}
deposits = localDeposits
log.Infof("took %s to retrieve deposits", time.Since(stTime))
return nil
})
eg.Go(func() error {
stTime := time.Now()
// Pack aggregated attestations which have not been included in the beacon chain.
localAtts, err := vs.packAttestations(egctx, head)
if err != nil {
@@ -51,6 +56,7 @@ func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.Be
default:
}
atts = localAtts
log.Infof("took %s to retrieve attestations", time.Since(stTime))
return nil
})

View File

@@ -30,6 +30,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
rpcBuilder "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node"
@@ -211,8 +212,19 @@ func (s *Service) Start() {
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ReplayerBuilder: ch,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
}
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards)
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards)
builderServer := &rpcBuilder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
}
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals)
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,

View File

@@ -207,7 +207,7 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
return stateutil.OptimizedValidatorRoots(val)
}
roots := make([][32]byte, 0, length)
rootCreator := func(input *ethpb.Validator) error {
@@ -218,15 +218,6 @@ func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll b
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {

View File

@@ -155,6 +155,20 @@ func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
e.lock.Lock()
defer e.lock.Unlock()
rInfo, ok, err := e.getByBlockRootLockFree(blockRoot)
if err != nil {
return err
}
if !ok {
return nil
}
slotInfo := &slotRootInfo{
slot: rInfo.state.Slot(),
blockRoot: blockRoot,
}
if err = e.slotRootCache.Delete(slotInfo); err != nil {
return err
}
return e.rootStateCache.Delete(&rootStateInfo{
root: blockRoot,
})

View File

@@ -36,12 +36,12 @@ func (_ *State) replayBlocks(
var err error
start := time.Now()
log = log.WithFields(logrus.Fields{
rLog := log.WithFields(logrus.Fields{
"startSlot": state.Slot(),
"endSlot": targetSlot,
"diff": targetSlot - state.Slot(),
})
log.Debug("Replaying state")
rLog.Debug("Replaying state")
// The input block list is sorted in decreasing slots order.
if len(signed) > 0 {
for i := len(signed) - 1; i >= 0; i-- {
@@ -71,7 +71,7 @@ func (_ *State) replayBlocks(
}
duration := time.Since(start)
log.WithFields(logrus.Fields{
rLog.WithFields(logrus.Fields{
"duration": duration,
}).Debug("Replayed state")

View File

@@ -2,8 +2,10 @@ package stategen
import (
"context"
"fmt"
"math"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -79,6 +81,41 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
if err := s.epochBoundaryStateCache.put(blockRoot, st); err != nil {
return err
}
} else {
// Always check that the correct epoch boundary states have been saved
// for the current epoch.
epochStart, err := slots.EpochStart(slots.ToEpoch(st.Slot()))
if err != nil {
return err
}
bRoot, err := helpers.BlockRootAtSlot(st, epochStart)
if err != nil {
return err
}
_, ok, err := s.epochBoundaryStateCache.getByBlockRoot([32]byte(bRoot))
if err != nil {
return err
}
// We would only recover the boundary states under this condition:
//
// 1) Would indicate that the epoch boundary was skipped due to a missed slot, we
// then recover by saving the state at that particular slot here.
if !ok {
// Only recover the state if it is in our hot state cache, otherwise we
// simply skip this step.
if s.hotStateCache.has([32]byte(bRoot)) {
log.WithFields(logrus.Fields{
"slot": epochStart,
"root": fmt.Sprintf("%#x", bRoot),
}).Debug("Recovering state for epoch boundary cache")
hState := s.hotStateCache.get([32]byte(bRoot))
if err := s.epochBoundaryStateCache.put([32]byte(bRoot), hState); err != nil {
return err
}
}
}
}
// On an intermediate slot, save state summary.

View File

@@ -7,6 +7,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
@@ -137,6 +138,34 @@ func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) {
require.Equal(t, false, beaconDB.HasState(ctx, r))
}
func TestSaveState_RecoverForEpochBoundary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB, doublylinkedtree.New())
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
r := [32]byte{'A'}
boundaryRoot := [32]byte{'B'}
require.NoError(t, beaconState.UpdateBlockRootAtIndex(0, boundaryRoot))
b := util.NewBeaconBlock()
util.SaveBlock(t, ctx, beaconDB, b)
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Save boundary state to the hot state cache.
boundaryState, _ := util.DeterministicGenesisState(t, 32)
service.hotStateCache.put(boundaryRoot, boundaryState)
require.NoError(t, service.SaveState(ctx, r, beaconState))
rInfo, ok, err := service.epochBoundaryStateCache.getByBlockRoot(boundaryRoot)
assert.NoError(t, err)
assert.Equal(t, true, ok, "state does not exist in cache")
assert.Equal(t, rInfo.root, boundaryRoot, "incorrect root of root state info")
assert.Equal(t, rInfo.state.Slot(), primitives.Slot(0), "incorrect slot of state")
}
func TestSaveState_CanSaveHotStateToDB(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()

View File

@@ -30,7 +30,7 @@ func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
}
func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
roots, err := optimizedValidatorRoots(validators)
roots, err := OptimizedValidatorRoots(validators)
if err != nil {
return [32]byte{}, err
}
@@ -51,7 +51,9 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
return res, nil
}
func optimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
// derive a list of validator roots from a list of validator objects.
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
// Exit early if no validators are provided.
if len(validators) == 0 {
return [][32]byte{}, nil

View File

@@ -139,7 +139,7 @@ func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch,
continue
}
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
log.WithError(chunkErr).Error("Could not send a chunked response")
log.WithError(chunkErr).Debug("Could not send a chunked response")
return chunkErr
}
}

View File

@@ -0,0 +1,19 @@
# From Bazel's perspective, this is almost equivalent to always specifying
# --extra_toolchains on every bazel <...> command-line invocation. It also
# means there is no way to disable the toolchain with the command line. This is
# useful if you find bazel-hermetic-cc useful enough to compile for all of your
# targets and tools.
#
# With BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 Bazel stops detecting the default
# host toolchain. Configuring toolchains is complicated enough, and the
# auto-detection (read: fallback to non-hermetic toolchain) is a footgun best
# avoided. This option is not documented in bazel, so may break. If you intend
# to use the hermetic toolchain exclusively, it won't hurt.
build --action_env BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# This snippet instructs Bazel to use the registered "new kinds of toolchains".
# This flag not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build --incompatible_enable_cc_toolchain_resolution
# Add a no-op warning for users still using --config=llvm
build:llvm --unconditional_warning="llvm config is no longer used as clang is now the default compiler"

View File

@@ -202,11 +202,7 @@ var (
Usage: "Sets the maximum number of headers that a deposit log query can fetch.",
Value: uint64(1000),
}
// EnableRegistrationCache a temporary flag for enabling the validator registration cache instead of db.
EnableRegistrationCache = &cli.BoolFlag{
Name: "enable-registration-cache",
Usage: "A temporary flag for enabling the validator registration cache instead of persisting in db. The cache will clear on restart.",
}
// WeakSubjectivityCheckpoint defines the weak subjectivity checkpoint the node must sync through to defend against long range attacks.
WeakSubjectivityCheckpoint = &cli.StringFlag{
Name: "weak-subjectivity-checkpoint",

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"runtime"
runtimeDebug "runtime/debug"
gethlog "github.com/ethereum/go-ethereum/log"
@@ -58,7 +59,6 @@ var appFlags = []cli.Flag{
flags.InteropGenesisTimeFlag,
flags.SlotsPerArchivedPoint,
flags.EnableDebugRPCEndpoints,
flags.EnableRegistrationCache,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,
@@ -204,6 +204,7 @@ func main() {
if err := fdlimits.SetMaxFdLimits(); err != nil {
return err
}
runtime.GOMAXPROCS(2)
return cmd.ValidateNoArgs(ctx)
}

View File

@@ -113,7 +113,6 @@ var appHelpFlagGroups = []flagGroup{
flags.BlockBatchLimit,
flags.BlockBatchLimitBurstFactor,
flags.EnableDebugRPCEndpoints,
flags.EnableRegistrationCache,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,

View File

@@ -134,7 +134,6 @@ func TestExitAccountsCli_OK_AllPublicKeys(t *testing.T) {
mockNodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Times(2).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
mockValidatorClient.EXPECT().

View File

@@ -70,6 +70,7 @@ type Flags struct {
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
BuildBlockParallel bool // BuildBlockParallel builds beacon block for proposer in parallel.
AggregateParallel bool // AggregateParallel aggregates attestations in parallel.
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
// changed on disk. This feature is for advanced use cases only.
@@ -229,6 +230,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(disableBuildBlockParallel)
cfg.BuildBlockParallel = false
}
if ctx.IsSet(aggregateParallel.Name) {
logEnabled(aggregateParallel)
cfg.AggregateParallel = true
}
if ctx.IsSet(disableResourceManager.Name) {
logEnabled(disableResourceManager)
cfg.DisableResourceManager = true

View File

@@ -32,6 +32,12 @@ var (
Usage: deprecatedUsage,
Hidden: true,
}
deprecatedEnableRegistrationCache = &cli.BoolFlag{
Name: "enable-registration-cache",
Usage: deprecatedUsage,
Hidden: true,
}
)
// Deprecated flags for both the beacon node and validator client.
@@ -41,6 +47,7 @@ var deprecatedFlags = []cli.Flag{
deprecatedEnableReorgLateBlocks,
deprecatedDisableGossipBatchAggregation,
deprecatedBuildBlockParallel,
deprecatedEnableRegistrationCache,
}
// deprecatedBeaconFlags contains flags that are still used by other components

View File

@@ -148,6 +148,17 @@ var (
Name: "disable-resource-manager",
Usage: "Disables running the libp2p resource manager",
}
// DisableRegistrationCache a flag for disabling the validator registration cache and use db instead.
DisableRegistrationCache = &cli.BoolFlag{
Name: "diable-registration-cache",
Usage: "A temporary flag for disabling the validator registration cache instead of using the db. note: registrations do not clear on restart while using the db",
}
aggregateParallel = &cli.BoolFlag{
Name: "aggregate-parallel",
Usage: "Enables parallel aggregation of attestations",
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -200,6 +211,8 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
aggregateSecondInterval,
aggregateThirdInterval,
disableResourceManager,
DisableRegistrationCache,
aggregateParallel,
}...)...)
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.

View File

@@ -84,6 +84,15 @@ type ProposerSettings struct {
DefaultConfig *ProposerOption
}
// ShouldBeSaved goes through checks to see if the value should be saveable
// Pseudocode: conditions for being saved into the database
// 1. settings are not nil
// 2. proposeconfig is not nil (this defines specific settings for each validator key), default config can be nil in this case and fall back to beacon node settings
// 3. defaultconfig is not nil, meaning it has at least fee recipient settings (this defines general settings for all validator keys but keys will use settings from propose config if available), propose config can be nil in this case
func (settings *ProposerSettings) ShouldBeSaved() bool {
return settings != nil && (settings.ProposeConfig != nil || settings.DefaultConfig != nil && settings.DefaultConfig.FeeRecipientConfig != nil)
}
// ToPayload converts struct to ProposerSettingsPayload
func (ps *ProposerSettings) ToPayload() *validatorpb.ProposerSettingsPayload {
if ps == nil {

View File

@@ -98,3 +98,117 @@ func Test_Proposer_Setting_Cloning(t *testing.T) {
})
}
func TestProposerSettings_ShouldBeSaved(t *testing.T) {
key1hex := "0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a"
key1, err := hexutil.Decode(key1hex)
require.NoError(t, err)
type fields struct {
ProposeConfig map[[fieldparams.BLSPubkeyLength]byte]*ProposerOption
DefaultConfig *ProposerOption
}
tests := []struct {
name string
fields fields
want bool
}{
{
name: "Should be saved, proposeconfig populated and no default config",
fields: fields{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
Relays: []string{"https://example-relay.com"},
},
},
},
DefaultConfig: nil,
},
want: true,
},
{
name: "Should be saved, default populated and no proposeconfig ",
fields: fields{
ProposeConfig: nil,
DefaultConfig: &ProposerOption{
FeeRecipientConfig: &FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
Relays: []string{"https://example-relay.com"},
},
},
},
want: true,
},
{
name: "Should be saved, all populated",
fields: fields{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
Relays: []string{"https://example-relay.com"},
},
},
},
DefaultConfig: &ProposerOption{
FeeRecipientConfig: &FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
Relays: []string{"https://example-relay.com"},
},
},
},
want: true,
},
{
name: "Should not be saved, proposeconfig not populated and default not populated",
fields: fields{
ProposeConfig: nil,
DefaultConfig: nil,
},
want: false,
},
{
name: "Should not be saved, builder data only",
fields: fields{
ProposeConfig: nil,
DefaultConfig: &ProposerOption{
BuilderConfig: &BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
Relays: []string{"https://example-relay.com"},
},
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
settings := &ProposerSettings{
ProposeConfig: tt.fields.ProposeConfig,
DefaultConfig: tt.fields.DefaultConfig,
}
if got := settings.ShouldBeSaved(); got != tt.want {
t.Errorf("ShouldBeSaved() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -3,16 +3,11 @@
package blst
import (
"runtime"
blst "github.com/supranational/blst/bindings/go"
)
func init() {
// Reserve 1 core for general application work
maxProcs := runtime.GOMAXPROCS(0) - 1
if maxProcs <= 0 {
maxProcs = 1
}
maxProcs := 1
blst.SetMaxProcs(maxProcs)
}

View File

@@ -4867,12 +4867,3 @@ def prysm_deps():
sum = "h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=",
version = "v1.24.0",
)
# Note: go_repository is already wrapped with maybe!
maybe(
git_repository,
name = "vaticle_bazel_distribution",
commit = "96424c85195a97dad81f69fdbbef2e1574bf8801",
remote = "https://github.com/vaticle/bazel-distribution",
shallow_since = "1569509514 +0300",
)

View File

@@ -134,6 +134,7 @@ func TestUnmarshalState(t *testing.T) {
}()
bc := params.BeaconConfig()
altairSlot, err := slots.EpochStart(bc.AltairForkEpoch)
require.NoError(t, err)
bellaSlot, err := slots.EpochStart(bc.BellatrixForkEpoch)
require.NoError(t, err)
cases := []struct {
@@ -205,6 +206,7 @@ func TestUnmarshalBlock(t *testing.T) {
altairv := bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion)
bellav := bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion)
altairS, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
require.NoError(t, err)
bellaS, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
cases := []struct {
@@ -296,6 +298,7 @@ func TestUnmarshalBlindedBlock(t *testing.T) {
altairv := bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion)
bellav := bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion)
altairS, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
require.NoError(t, err)
bellaS, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
cases := []struct {

View File

@@ -147,11 +147,11 @@
}
},
"ineffassign": {
"only_files": {
"beacon-chain/.*": "",
"shared/.*": "",
"slasher/.*": "",
"validator/.*": ""
"exclude_files": {
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok"
}
},
"properpermissions": {

File diff suppressed because it is too large Load Diff

View File

@@ -597,6 +597,76 @@ func local_request_KeyManagement_DeleteGasLimit_0(ctx context.Context, marshaler
}
func request_KeyManagement_SetVoluntaryExit_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq SetVoluntaryExitRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["pubkey"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
}
pubkey, err := runtime.Bytes(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
}
protoReq.Pubkey = (pubkey)
msg, err := client.SetVoluntaryExit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_SetVoluntaryExit_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq SetVoluntaryExitRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["pubkey"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
}
pubkey, err := runtime.Bytes(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
}
protoReq.Pubkey = (pubkey)
msg, err := server.SetVoluntaryExit(ctx, &protoReq)
return msg, metadata, err
}
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
// UnaryRPC :call KeyManagementServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -879,6 +949,29 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("POST", pattern_KeyManagement_SetVoluntaryExit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/SetVoluntaryExit")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_SetVoluntaryExit_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_SetVoluntaryExit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -1160,6 +1253,26 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("POST", pattern_KeyManagement_SetVoluntaryExit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/SetVoluntaryExit")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_SetVoluntaryExit_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_SetVoluntaryExit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -1187,6 +1300,8 @@ var (
pattern_KeyManagement_SetGasLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "gas_limit"}, ""))
pattern_KeyManagement_DeleteGasLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "gas_limit"}, ""))
pattern_KeyManagement_SetVoluntaryExit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "voluntary_exit"}, ""))
)
var (
@@ -1213,4 +1328,6 @@ var (
forward_KeyManagement_SetGasLimit_0 = runtime.ForwardResponseMessage
forward_KeyManagement_DeleteGasLimit_0 = runtime.ForwardResponseMessage
forward_KeyManagement_SetVoluntaryExit_0 = runtime.ForwardResponseMessage
)

View File

@@ -18,6 +18,7 @@ package ethereum.eth.service;
import "google/api/annotations.proto";
import "google/protobuf/descriptor.proto";
import "google/protobuf/empty.proto";
import "proto/eth/ext/options.proto";
option csharp_namespace = "Ethereum.Eth.Service";
option go_package = "github.com/prysmaticlabs/prysm/v4/proto/eth/service";
@@ -217,6 +218,24 @@ service KeyManagement {
};
}
// SetVoluntaryExit creates a signed voluntary exit message and returns a VoluntaryExit object.
//
// Spec page: https://ethereum.github.io/keymanager-APIs/?urls.primaryName=dev#/Voluntary%20Exit/signVoluntaryExit
//
// HTTP response status codes:
// - 200: Successfully created and signed voluntary exit
// - 400: Bad request, malformed request
// - 401: Unauthorized, no token is found.
// - 403: Forbidden, a token is found but is invalid
// - 404: Path not found
// - 500: Validator internal error
rpc SetVoluntaryExit(SetVoluntaryExitRequest) returns (SetVoluntaryExitResponse) {
option (google.api.http) = {
post: "/internal/eth/v1/validator/{pubkey}/voluntary_exit",
body: "*"
};
}
}
message ListKeystoresResponse {
@@ -352,3 +371,20 @@ message SetGasLimitRequest {
message DeleteGasLimitRequest {
bytes pubkey = 1;
}
message SetVoluntaryExitRequest {
bytes pubkey = 1 [(ethereum.eth.ext.ssz_size) = "48"];
uint64 epoch = 2 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.Epoch"];
}
message SetVoluntaryExitResponse {
message SignedVoluntaryExit {
message VoluntaryExit {
uint64 epoch = 1 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.Epoch"];
uint64 validator_index = 2 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.ValidatorIndex"];
}
VoluntaryExit message = 1;
bytes signature = 2;
}
SignedVoluntaryExit data = 1;
}

View File

@@ -451,14 +451,8 @@ func TestCopyBlindedBeaconBlockBodyCapella(t *testing.T) {
func bytes(length int) []byte {
b := make([]byte, length)
_, err := rand.Read(b)
if err != nil {
panic(err)
}
for i := 0; i < length; i++ {
if b[i] == 0x00 {
b[i] = uint8(rand.Int())
}
b[i] = uint8(rand.Int31n(255) + 1)
}
return b
}

View File

@@ -256,7 +256,7 @@ func (c *componentHandler) required() []e2etypes.ComponentRunner {
func (c *componentHandler) printPIDs(logger func(string, ...interface{})) {
msg := "\nPID of components. Attach a debugger... if you dare!\n\n"
msg = "This test PID: " + strconv.Itoa(os.Getpid()) + " (parent=" + strconv.Itoa(os.Getppid()) + ")\n"
msg += "This test PID: " + strconv.Itoa(os.Getpid()) + " (parent=" + strconv.Itoa(os.Getppid()) + ")\n"
// Beacon chain nodes
msg += fmt.Sprintf("Beacon chain nodes: %v\n", PIDsFromMultiComponentRunner(c.beaconNodes))

View File

@@ -114,7 +114,7 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
for i := uint64(0); i < N; i++ {
index := i
g.Go(func() error {
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, expectedPrice, nil, al)
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, gasPrice, nil, al)
if err != nil {
// In the event the transaction constructed is not valid, we continue with the routine
// rather than complete stop it.
@@ -129,9 +129,12 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
return nil
}
err = backend.SendTransaction(context.Background(), signedTx)
// We continue on if the constructed transaction is invalid
// and can't be submitted on chain.
//nolint:nilerr
if err != nil {
// We continue on if the constructed transaction is invalid
// and can't be submitted on chain.
//nolint:nilerr
return nil
}
return nil
})
}

View File

@@ -172,11 +172,6 @@ func (v *LighthouseValidatorNode) Start(ctx context.Context) error {
}
_, _, index, _ := v.config, v.validatorNum, v.index, v.offset
beaconRPCPort := e2e.TestParams.Ports.PrysmBeaconNodeRPCPort + index
if beaconRPCPort >= e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+e2e.TestParams.BeaconNodeCount {
// Point any extra validator clients to a node we know is running.
beaconRPCPort = e2e.TestParams.Ports.PrysmBeaconNodeRPCPort
}
kPath := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-validator-%d", index)
testNetDir := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-testnet-%d", index)
httpPort := e2e.TestParams.Ports.LighthouseBeaconNodeHTTPPort

View File

@@ -516,7 +516,6 @@ func (r *testRunner) defaultEndToEndRun() error {
if err := r.testCheckpointSync(ctx, g, index, conns, httpEndpoints[0], benr, menr); err != nil {
return errors.Wrap(err, "checkpoint sync test failed")
}
index += 1
}
if config.ExtraEpochs > 0 {

View File

@@ -76,9 +76,16 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
rewards, penalties, err := altair.AttestationsDelta(preBeaconState, bp, vp)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
totalSpecTestRewards := make([]uint64, len(rewards))
totalSpecTestPenalties := make([]uint64, len(penalties))

View File

@@ -80,9 +80,16 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
rewards, penalties, err := altair.AttestationsDelta(preBeaconState, bp, vp)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
totalSpecTestRewards := make([]uint64, len(rewards))
totalSpecTestPenalties := make([]uint64, len(penalties))

View File

@@ -80,9 +80,16 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
rewards, penalties, err := altair.AttestationsDelta(preBeaconState, bp, vp)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
totalSpecTestRewards := make([]uint64, len(rewards))
totalSpecTestPenalties := make([]uint64, len(penalties))

View File

@@ -0,0 +1,114 @@
load("@bazel_tools//tools/cpp:unix_cc_configure.bzl", "configure_unix_toolchain")
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"get_cpu_value",
"resolve_labels",
)
"""
This file is a copy of https://github.com/bazelbuild/bazel/blob/master/tools/cpp/cc_configure.bzl
with some minor changes. The original file is licensed under Apache 2.0 license. The gist of this
is that we want darwin to register the local toolchain and disregard the environment variable of
BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN. We must support a local toolchain for darwin until
hermetic_cc_toolchain supports darwin's sysroot in a hermetic way.
"""
def cc_autoconf_toolchains_impl(repository_ctx):
"""Generate BUILD file with 'toolchain' targets for the local host C++ toolchain.
Args:
repository_ctx: repository context
"""
cpu_value = get_cpu_value(repository_ctx)
if cpu_value.startswith("darwin"):
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.toolchains.tpl",
])
repository_ctx.template(
"BUILD",
paths["@bazel_tools//tools/cpp:BUILD.toolchains.tpl"],
{"%{name}": get_cpu_value(repository_ctx)},
)
else:
repository_ctx.file("BUILD", "# C++ toolchain autoconfiguration was disabled by BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN env variable.")
def cc_autoconf_impl(repository_ctx, overriden_tools = dict()):
"""Generate BUILD file with 'cc_toolchain' targets for the local host C++ toolchain.
Args:
repository_ctx: repository context
overriden_tools: dict of tool paths to use instead of autoconfigured tools
"""
cpu_value = get_cpu_value(repository_ctx)
if cpu_value.startswith("darwin"):
print("Configuring local C++ toolchain for Darwin. This is non-hermetic and builds may " +
"not be reproducible. Consider building on linux for a hermetic build.")
configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools)
else:
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.empty.tpl",
"@bazel_tools//tools/cpp:empty_cc_toolchain_config.bzl",
])
repository_ctx.symlink(paths["@bazel_tools//tools/cpp:empty_cc_toolchain_config.bzl"], "cc_toolchain_config.bzl")
repository_ctx.template("BUILD", paths["@bazel_tools//tools/cpp:BUILD.empty.tpl"], {
"%{cpu}": get_cpu_value(repository_ctx),
})
cc_autoconf_toolchains = repository_rule(
environ = [
"BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN",
],
implementation = cc_autoconf_toolchains_impl,
configure = True,
)
cc_autoconf = repository_rule(
environ = [
"ABI_LIBC_VERSION",
"ABI_VERSION",
"BAZEL_COMPILER",
"BAZEL_HOST_SYSTEM",
"BAZEL_CONLYOPTS",
"BAZEL_CXXOPTS",
"BAZEL_LINKOPTS",
"BAZEL_LINKLIBS",
"BAZEL_LLVM_COV",
"BAZEL_LLVM_PROFDATA",
"BAZEL_PYTHON",
"BAZEL_SH",
"BAZEL_TARGET_CPU",
"BAZEL_TARGET_LIBC",
"BAZEL_TARGET_SYSTEM",
"BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN",
"BAZEL_USE_LLVM_NATIVE_COVERAGE",
"BAZEL_LLVM",
"BAZEL_IGNORE_SYSTEM_HEADERS_VERSIONS",
"USE_CLANG_CL",
"CC",
"CC_CONFIGURE_DEBUG",
"CC_TOOLCHAIN_NAME",
"CPLUS_INCLUDE_PATH",
"DEVELOPER_DIR",
"GCOV",
"LIBTOOL",
"HOMEBREW_RUBY_PATH",
"SYSTEMROOT",
"USER",
],
implementation = cc_autoconf_impl,
configure = True,
)
def configure_nonhermetic_darwin():
"""A C++ configuration rules that generate the crosstool file."""
cc_autoconf_toolchains(name = "local_config_cc_toolchains")
cc_autoconf(name = "local_config_cc")
native.bind(name = "cc_toolchain", actual = "@local_config_cc//:toolchain")
native.register_toolchains(
# Use register_toolchain's target pattern expansion to register all toolchains in the package.
"@local_config_cc_toolchains//:all",
)

View File

@@ -84,11 +84,19 @@ func PerformVoluntaryExit(
ctx context.Context, cfg PerformExitCfg,
) (rawExitedKeys [][]byte, formattedExitedKeys []string, err error) {
var rawNotExitedKeys [][]byte
genesisResponse, err := cfg.NodeClient.GetGenesis(ctx, &emptypb.Empty{})
if err != nil {
log.WithError(err).Errorf("voluntary exit failed: %v", err)
}
for i, key := range cfg.RawPubKeys {
// When output directory is present, only create the signed exit, but do not propose it.
// Otherwise, propose the exit immediately.
epoch, err := client.CurrentEpoch(genesisResponse.GenesisTime)
if err != nil {
log.WithError(err).Errorf("voluntary exit failed: %v", err)
}
if len(cfg.OutputDirectory) > 0 {
sve, err := client.CreateSignedVoluntaryExit(ctx, cfg.ValidatorClient, cfg.NodeClient, cfg.Keymanager.Sign, key)
sve, err := client.CreateSignedVoluntaryExit(ctx, cfg.ValidatorClient, cfg.Keymanager.Sign, key, epoch)
if err != nil {
rawNotExitedKeys = append(rawNotExitedKeys, key)
msg := err.Error()
@@ -101,7 +109,7 @@ func PerformVoluntaryExit(
} else if err := writeSignedVoluntaryExitJSON(ctx, sve, cfg.OutputDirectory); err != nil {
log.WithError(err).Error("failed to write voluntary exit")
}
} else if err := client.ProposeExit(ctx, cfg.ValidatorClient, cfg.NodeClient, cfg.Keymanager.Sign, key); err != nil {
} else if err := client.ProposeExit(ctx, cfg.ValidatorClient, cfg.Keymanager.Sign, key, epoch); err != nil {
rawNotExitedKeys = append(rawNotExitedKeys, key)
msg := err.Error()

View File

@@ -92,113 +92,113 @@ type MockValidator struct {
proposerSettings *validatorserviceconfig.ProposerSettings
}
func (_ MockValidator) LogSyncCommitteeMessagesSubmitted() {}
func (_ *MockValidator) LogSyncCommitteeMessagesSubmitted() {}
func (_ MockValidator) Done() {
func (_ *MockValidator) Done() {
panic("implement me")
}
func (_ MockValidator) WaitForChainStart(_ context.Context) error {
func (_ *MockValidator) WaitForChainStart(_ context.Context) error {
panic("implement me")
}
func (_ MockValidator) WaitForSync(_ context.Context) error {
func (_ *MockValidator) WaitForSync(_ context.Context) error {
panic("implement me")
}
func (_ MockValidator) WaitForActivation(_ context.Context, _ chan [][48]byte) error {
func (_ *MockValidator) WaitForActivation(_ context.Context, _ chan [][48]byte) error {
panic("implement me")
}
func (_ MockValidator) CanonicalHeadSlot(_ context.Context) (primitives.Slot, error) {
func (_ *MockValidator) CanonicalHeadSlot(_ context.Context) (primitives.Slot, error) {
panic("implement me")
}
func (_ MockValidator) NextSlot() <-chan primitives.Slot {
func (_ *MockValidator) NextSlot() <-chan primitives.Slot {
panic("implement me")
}
func (_ MockValidator) SlotDeadline(_ primitives.Slot) time.Time {
func (_ *MockValidator) SlotDeadline(_ primitives.Slot) time.Time {
panic("implement me")
}
func (_ MockValidator) LogValidatorGainsAndLosses(_ context.Context, _ primitives.Slot) error {
func (_ *MockValidator) LogValidatorGainsAndLosses(_ context.Context, _ primitives.Slot) error {
panic("implement me")
}
func (_ MockValidator) UpdateDuties(_ context.Context, _ primitives.Slot) error {
func (_ *MockValidator) UpdateDuties(_ context.Context, _ primitives.Slot) error {
panic("implement me")
}
func (_ MockValidator) RolesAt(_ context.Context, _ primitives.Slot) (map[[48]byte][]iface2.ValidatorRole, error) {
func (_ *MockValidator) RolesAt(_ context.Context, _ primitives.Slot) (map[[48]byte][]iface2.ValidatorRole, error) {
panic("implement me")
}
func (_ MockValidator) SubmitAttestation(_ context.Context, _ primitives.Slot, _ [48]byte) {
func (_ *MockValidator) SubmitAttestation(_ context.Context, _ primitives.Slot, _ [48]byte) {
panic("implement me")
}
func (_ MockValidator) ProposeBlock(_ context.Context, _ primitives.Slot, _ [48]byte) {
func (_ *MockValidator) ProposeBlock(_ context.Context, _ primitives.Slot, _ [48]byte) {
panic("implement me")
}
func (_ MockValidator) SubmitAggregateAndProof(_ context.Context, _ primitives.Slot, _ [48]byte) {
func (_ *MockValidator) SubmitAggregateAndProof(_ context.Context, _ primitives.Slot, _ [48]byte) {
panic("implement me")
}
func (_ MockValidator) SubmitSyncCommitteeMessage(_ context.Context, _ primitives.Slot, _ [48]byte) {
func (_ *MockValidator) SubmitSyncCommitteeMessage(_ context.Context, _ primitives.Slot, _ [48]byte) {
panic("implement me")
}
func (_ MockValidator) SubmitSignedContributionAndProof(_ context.Context, _ primitives.Slot, _ [48]byte) {
func (_ *MockValidator) SubmitSignedContributionAndProof(_ context.Context, _ primitives.Slot, _ [48]byte) {
panic("implement me")
}
func (_ MockValidator) LogAttestationsSubmitted() {
func (_ *MockValidator) LogAttestationsSubmitted() {
panic("implement me")
}
func (_ MockValidator) UpdateDomainDataCaches(_ context.Context, _ primitives.Slot) {
func (_ *MockValidator) UpdateDomainDataCaches(_ context.Context, _ primitives.Slot) {
panic("implement me")
}
func (_ MockValidator) WaitForKeymanagerInitialization(_ context.Context) error {
func (_ *MockValidator) WaitForKeymanagerInitialization(_ context.Context) error {
panic("implement me")
}
func (m MockValidator) Keymanager() (keymanager.IKeymanager, error) {
func (m *MockValidator) Keymanager() (keymanager.IKeymanager, error) {
return m.Km, nil
}
func (_ MockValidator) ReceiveBlocks(_ context.Context, _ chan<- error) {
func (_ *MockValidator) ReceiveBlocks(_ context.Context, _ chan<- error) {
panic("implement me")
}
func (_ MockValidator) HandleKeyReload(_ context.Context, _ [][48]byte) (bool, error) {
func (_ *MockValidator) HandleKeyReload(_ context.Context, _ [][48]byte) (bool, error) {
panic("implement me")
}
func (_ MockValidator) CheckDoppelGanger(_ context.Context) error {
func (_ *MockValidator) CheckDoppelGanger(_ context.Context) error {
panic("implement me")
}
// HasProposerSettings for mocking
func (MockValidator) HasProposerSettings() bool {
func (*MockValidator) HasProposerSettings() bool {
panic("implement me")
}
// PushProposerSettings for mocking
func (_ MockValidator) PushProposerSettings(_ context.Context, _ keymanager.IKeymanager, _ primitives.Slot, _ time.Time) error {
func (_ *MockValidator) PushProposerSettings(_ context.Context, _ keymanager.IKeymanager, _ primitives.Slot, _ time.Time) error {
panic("implement me")
}
// SetPubKeyToValidatorIndexMap for mocking
func (_ MockValidator) SetPubKeyToValidatorIndexMap(_ context.Context, _ keymanager.IKeymanager) error {
func (_ *MockValidator) SetPubKeyToValidatorIndexMap(_ context.Context, _ keymanager.IKeymanager) error {
panic("implement me")
}
// SignValidatorRegistrationRequest for mocking
func (_ MockValidator) SignValidatorRegistrationRequest(_ context.Context, _ iface2.SigningFunc, _ *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) {
func (_ *MockValidator) SignValidatorRegistrationRequest(_ context.Context, _ iface2.SigningFunc, _ *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) {
panic("implement me")
}
@@ -208,6 +208,7 @@ func (m *MockValidator) ProposerSettings() *validatorserviceconfig.ProposerSetti
}
// SetProposerSettings for mocking
func (m *MockValidator) SetProposerSettings(settings *validatorserviceconfig.ProposerSettings) {
func (m *MockValidator) SetProposerSettings(_ context.Context, settings *validatorserviceconfig.ProposerSettings) error {
m.proposerSettings = settings
return nil
}

View File

@@ -80,6 +80,7 @@ go_library(
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
@@ -173,6 +174,5 @@ go_test(
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
],
)

View File

@@ -63,7 +63,7 @@ type Validator interface {
PushProposerSettings(ctx context.Context, km keymanager.IKeymanager, slot primitives.Slot, deadline time.Time) error
SignValidatorRegistrationRequest(ctx context.Context, signer SigningFunc, newValidatorRegistration *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error)
ProposerSettings() *validatorserviceconfig.ProposerSettings
SetProposerSettings(*validatorserviceconfig.ProposerSettings)
SetProposerSettings(context.Context, *validatorserviceconfig.ProposerSettings) error
}
// SigningFunc interface defines a type for the a function that signs a message

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"time"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
@@ -25,7 +26,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/validator/client/iface"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/types/known/emptypb"
)
const domainDataErr = "could not get domain data"
@@ -198,14 +198,14 @@ func (v *validator) ProposeBlock(ctx context.Context, slot primitives.Slot, pubK
func ProposeExit(
ctx context.Context,
validatorClient iface.ValidatorClient,
nodeClient iface.NodeClient,
signer iface.SigningFunc,
pubKey []byte,
epoch primitives.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "validator.ProposeExit")
defer span.End()
signedExit, err := CreateSignedVoluntaryExit(ctx, validatorClient, nodeClient, signer, pubKey)
signedExit, err := CreateSignedVoluntaryExit(ctx, validatorClient, signer, pubKey, epoch)
if err != nil {
return errors.Wrap(err, "failed to create signed voluntary exit")
}
@@ -217,16 +217,22 @@ func ProposeExit(
span.AddAttributes(
trace.StringAttribute("exitRoot", fmt.Sprintf("%#x", exitResp.ExitRoot)),
)
return nil
}
func CurrentEpoch(genesisTime *timestamp.Timestamp) (primitives.Epoch, error) {
totalSecondsPassed := prysmTime.Now().Unix() - genesisTime.Seconds
currentSlot := primitives.Slot((uint64(totalSecondsPassed)) / params.BeaconConfig().SecondsPerSlot)
currentEpoch := slots.ToEpoch(currentSlot)
return currentEpoch, nil
}
func CreateSignedVoluntaryExit(
ctx context.Context,
validatorClient iface.ValidatorClient,
nodeClient iface.NodeClient,
signer iface.SigningFunc,
pubKey []byte,
epoch primitives.Epoch,
) (*ethpb.SignedVoluntaryExit, error) {
ctx, span := trace.StartSpan(ctx, "validator.CreateSignedVoluntaryExit")
defer span.End()
@@ -235,15 +241,12 @@ func CreateSignedVoluntaryExit(
if err != nil {
return nil, errors.Wrap(err, "gRPC call to get validator index failed")
}
genesisResponse, err := nodeClient.GetGenesis(ctx, &emptypb.Empty{})
exit := &ethpb.VoluntaryExit{Epoch: epoch, ValidatorIndex: indexResponse.Index}
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrap(err, "gRPC call to get genesis time failed")
return nil, errors.Wrap(err, "failed to retrieve slot")
}
totalSecondsPassed := prysmTime.Now().Unix() - genesisResponse.GenesisTime.Seconds
currentEpoch := primitives.Epoch(uint64(totalSecondsPassed) / uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)))
currentSlot := slots.CurrentSlot(uint64(genesisResponse.GenesisTime.AsTime().Unix()))
exit := &ethpb.VoluntaryExit{Epoch: currentEpoch, ValidatorIndex: indexResponse.Index}
sig, err := signVoluntaryExit(ctx, validatorClient, signer, pubKey, exit, currentSlot)
sig, err := signVoluntaryExit(ctx, validatorClient, signer, pubKey, exit, slot)
if err != nil {
return nil, errors.Wrap(err, "failed to sign voluntary exit")
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
@@ -28,7 +27,6 @@ import (
testing2 "github.com/prysmaticlabs/prysm/v4/validator/db/testing"
"github.com/prysmaticlabs/prysm/v4/validator/graffiti"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/types/known/timestamppb"
)
type mocks struct {
@@ -648,39 +646,15 @@ func TestProposeExit_ValidatorIndexFailed(t *testing.T) {
err := ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
params.BeaconConfig().GenesisEpoch,
)
assert.NotNil(t, err)
assert.ErrorContains(t, "uh oh", err)
assert.ErrorContains(t, "gRPC call to get validator index failed", err)
}
func TestProposeExit_GetGenesisFailed(t *testing.T) {
_, m, validatorKey, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().
ValidatorIndex(gomock.Any(), gomock.Any()).
Return(nil, nil)
m.nodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Return(nil, errors.New("uh oh"))
err := ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
)
assert.NotNil(t, err)
assert.ErrorContains(t, "uh oh", err)
assert.ErrorContains(t, "gRPC call to get genesis time failed", err)
}
func TestProposeExit_DomainDataFailed(t *testing.T) {
_, m, validatorKey, finish := setup(t)
defer finish()
@@ -689,15 +663,6 @@ func TestProposeExit_DomainDataFailed(t *testing.T) {
ValidatorIndex(gomock.Any(), gomock.Any()).
Return(&ethpb.ValidatorIndexResponse{Index: 1}, nil)
// Any time in the past will suffice
genesisTime := &timestamppb.Timestamp{
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
}
m.nodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
m.validatorClient.EXPECT().
DomainData(gomock.Any(), gomock.Any()).
Return(nil, errors.New("uh oh"))
@@ -705,9 +670,9 @@ func TestProposeExit_DomainDataFailed(t *testing.T) {
err := ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
params.BeaconConfig().GenesisEpoch,
)
assert.NotNil(t, err)
assert.ErrorContains(t, domainDataErr, err)
@@ -723,15 +688,6 @@ func TestProposeExit_DomainDataIsNil(t *testing.T) {
ValidatorIndex(gomock.Any(), gomock.Any()).
Return(&ethpb.ValidatorIndexResponse{Index: 1}, nil)
// Any time in the past will suffice
genesisTime := &timestamppb.Timestamp{
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
}
m.nodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
m.validatorClient.EXPECT().
DomainData(gomock.Any(), gomock.Any()).
Return(nil, nil)
@@ -739,9 +695,9 @@ func TestProposeExit_DomainDataIsNil(t *testing.T) {
err := ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
params.BeaconConfig().GenesisEpoch,
)
assert.NotNil(t, err)
assert.ErrorContains(t, domainDataErr, err)
@@ -756,15 +712,6 @@ func TestProposeBlock_ProposeExitFailed(t *testing.T) {
ValidatorIndex(gomock.Any(), gomock.Any()).
Return(&ethpb.ValidatorIndexResponse{Index: 1}, nil)
// Any time in the past will suffice
genesisTime := &timestamppb.Timestamp{
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
}
m.nodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
m.validatorClient.EXPECT().
DomainData(gomock.Any(), gomock.Any()).
Return(&ethpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil)
@@ -776,9 +723,9 @@ func TestProposeBlock_ProposeExitFailed(t *testing.T) {
err := ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
params.BeaconConfig().GenesisEpoch,
)
assert.NotNil(t, err)
assert.ErrorContains(t, "uh oh", err)
@@ -793,15 +740,6 @@ func TestProposeExit_BroadcastsBlock(t *testing.T) {
ValidatorIndex(gomock.Any(), gomock.Any()).
Return(&ethpb.ValidatorIndexResponse{Index: 1}, nil)
// Any time in the past will suffice
genesisTime := &timestamppb.Timestamp{
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
}
m.nodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
m.validatorClient.EXPECT().
DomainData(gomock.Any(), gomock.Any()).
Return(&ethpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil)
@@ -813,9 +751,9 @@ func TestProposeExit_BroadcastsBlock(t *testing.T) {
assert.NoError(t, ProposeExit(
context.Background(),
m.validatorClient,
m.nodeClient,
m.signfunc,
validatorKey.PublicKey().Marshal(),
params.BeaconConfig().GenesisEpoch,
))
}

View File

@@ -58,7 +58,7 @@ func run(ctx context.Context, v iface.Validator) {
if v.ProposerSettings() != nil {
log.Infof("Validator client started with provided proposer settings that sets options such as fee recipient"+
" and will periodically update the beacon node and custom builder (if --%s)", flags.EnableBuilderFlag.Name)
deadline := time.Now().Add(time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
deadline := time.Now().Add(5 * time.Minute)
if err := v.PushProposerSettings(ctx, km, headSlot, deadline); err != nil {
if errors.Is(err, ErrBuilderValidatorRegistration) {
log.WithError(err).Warn("Push proposer settings error")
@@ -67,7 +67,7 @@ func run(ctx context.Context, v iface.Validator) {
}
}
} else {
log.Warnln("Validator client started without proposer settings such as fee recipient" +
log.Warn("Validator client started without proposer settings such as fee recipient" +
" and will continue to use settings provided in the beacon node.")
}
@@ -108,9 +108,10 @@ func run(ctx context.Context, v iface.Validator) {
continue
}
if slots.IsEpochStart(slot) && v.ProposerSettings() != nil {
// create call on a separate thread to push proposer settings from the middle of an epoch.
if slots.SinceEpochStarts(slot) == params.BeaconConfig().SlotsPerEpoch/2 && v.ProposerSettings() != nil {
go func() {
// deadline set for end of epoch
// deadline set for 1 epoch from call to not overlap.
epochDeadline := v.SlotDeadline(slot + params.BeaconConfig().SlotsPerEpoch - 1)
if err := v.PushProposerSettings(ctx, km, slot, epochDeadline); err != nil {
log.WithError(err).Warn("Failed to update proposer settings")

View File

@@ -225,13 +225,14 @@ func notActive(t *testing.T) [fieldparams.BLSPubkeyLength]byte {
func TestUpdateProposerSettingsAt_EpochStart(t *testing.T) {
v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err := v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
},
},
})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
hook := logTest.NewGlobal()
slot := params.BeaconConfig().SlotsPerEpoch
@@ -247,39 +248,16 @@ func TestUpdateProposerSettingsAt_EpochStart(t *testing.T) {
assert.LogsContain(t, hook, "updated proposer settings")
}
func TestUpdateProposerSettingsAt_EpochEndExceeded(t *testing.T) {
v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, ProposerSettingWait: time.Duration(params.BeaconConfig().SecondsPerSlot+1) * time.Second}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
},
},
})
ctx, cancel := context.WithCancel(context.Background())
hook := logTest.NewGlobal()
slot := params.BeaconConfig().SlotsPerEpoch - 1 //have it set close to the end of epoch
ticker := make(chan primitives.Slot)
v.NextSlotRet = ticker
go func() {
ticker <- slot
cancel()
}()
run(ctx, v)
// can't test "Failed to update proposer settings" because of log.fatal
assert.LogsContain(t, hook, "deadline exceeded")
}
func TestUpdateProposerSettingsAt_EpochEndOk(t *testing.T) {
v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, ProposerSettingWait: time.Duration(params.BeaconConfig().SecondsPerSlot-1) * time.Second}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err := v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
},
},
})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
hook := logTest.NewGlobal()
slot := params.BeaconConfig().SlotsPerEpoch - 1 //have it set close to the end of epoch
@@ -301,13 +279,14 @@ func TestUpdateProposerSettings_ContinuesAfterValidatorRegistrationFails(t *test
ProposerSettingsErr: errors.Wrap(ErrBuilderValidatorRegistration, errSomeotherError.Error()),
Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err := v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
},
},
})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
hook := logTest.NewGlobal()
slot := params.BeaconConfig().SlotsPerEpoch

View File

@@ -188,14 +188,6 @@ func (v *ValidatorService) Start() {
return
}
// checks db if proposer settings exist if none is provided.
if v.proposerSettings == nil {
settings, err := v.db.ProposerSettings(v.ctx)
if err == nil {
v.proposerSettings = settings
}
}
validatorClient := validatorClientFactory.NewValidatorClient(v.conn)
beaconClient := beaconChainClientFactory.NewBeaconChainClient(v.conn)
@@ -283,15 +275,13 @@ func (v *ValidatorService) ProposerSettings() *validatorserviceconfig.ProposerSe
// SetProposerSettings sets the proposer settings on the validator service as well as the underlying validator
func (v *ValidatorService) SetProposerSettings(ctx context.Context, settings *validatorserviceconfig.ProposerSettings) error {
if v.db == nil {
return errors.New("db is not set")
}
if err := v.db.SaveProposerSettings(ctx, settings); err != nil {
return err
}
// validator service proposer settings is only used for pass through from node -> validator service -> validator.
// in memory use of proposer settings happens on validator.
v.proposerSettings = settings
v.validator.SetProposerSettings(settings)
return nil
// passes settings down to be updated in database and saved in memory.
// updates to validator porposer settings will be in the validator object and not validator service.
return v.validator.SetProposerSettings(ctx, settings)
}
// ConstructDialOptions constructs a list of grpc dial options

View File

@@ -281,6 +281,7 @@ func (f *FakeValidator) ProposerSettings() *validatorserviceconfig.ProposerSetti
}
// SetProposerSettings for mocking
func (f *FakeValidator) SetProposerSettings(settings *validatorserviceconfig.ProposerSettings) {
func (f *FakeValidator) SetProposerSettings(_ context.Context, settings *validatorserviceconfig.ProposerSettings) error {
f.proposerSettings = settings
return nil
}

View File

@@ -953,12 +953,21 @@ func (v *validator) logDuties(slot primitives.Slot, duties []*ethpb.DutiesRespon
}
}
// ProposerSettings gets the current proposer settings saved in memory validator
func (v *validator) ProposerSettings() *validatorserviceconfig.ProposerSettings {
return v.proposerSettings
}
func (v *validator) SetProposerSettings(settings *validatorserviceconfig.ProposerSettings) {
// SetProposerSettings sets and saves the passed in proposer settings overriding the in memory one
func (v *validator) SetProposerSettings(ctx context.Context, settings *validatorserviceconfig.ProposerSettings) error {
if v.db == nil {
return errors.New("db is not set")
}
if err := v.db.SaveProposerSettings(ctx, settings); err != nil {
return err
}
v.proposerSettings = settings
return nil
}
// PushProposerSettings calls the prepareBeaconProposer RPC to set the fee recipient and also the register validator API if using a custom builder.

View File

@@ -1403,7 +1403,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
GasLimit: 40000000,
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1415,6 +1415,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
client.EXPECT().SubmitValidatorRegistrations(
gomock.Any(),
gomock.Any(),
@@ -1484,7 +1485,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
GasLimit: 40000000,
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1496,6 +1497,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
client.EXPECT().SubmitValidatorRegistrations(
gomock.Any(),
gomock.Any(),
@@ -1557,7 +1559,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455043BFEBf6177F1D2e9738D9"),
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1565,6 +1567,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
return &v
},
feeRecipientMap: map[primitives.ValidatorIndex]string{
@@ -1599,7 +1602,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
require.NoError(t, err)
keys, err := km.FetchValidatingPublicKeys(ctx)
require.NoError(t, err)
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: nil,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1611,6 +1614,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
v.pubkeyToValidatorIndex[keys[0]] = primitives.ValidatorIndex(1)
client.EXPECT().MultipleValidatorStatus(
gomock.Any(),
@@ -1659,7 +1663,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
}
err := v.WaitForKeymanagerInitialization(ctx)
require.NoError(t, err)
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: nil,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1671,6 +1675,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
km, err := v.Keymanager()
require.NoError(t, err)
keys, err := km.FetchValidatingPublicKeys(ctx)
@@ -1745,7 +1750,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
FeeRecipient: common.Address{},
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1753,6 +1758,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
return &v
},
},
@@ -1787,7 +1793,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455043BFEBf6177F1D2e9738D9"),
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1795,6 +1801,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
return &v
},
},
@@ -1838,7 +1845,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
GasLimit: 40000000,
},
}
v.SetProposerSettings(&validatorserviceconfig.ProposerSettings{
err = v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
ProposeConfig: config,
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
@@ -1850,6 +1857,7 @@ func TestValidator_PushProposerSettings(t *testing.T) {
},
},
})
require.NoError(t, err)
client.EXPECT().PrepareBeaconProposer(gomock.Any(), &ethpb.PrepareBeaconProposerRequest{
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: common.HexToAddress("0x0").Bytes(), ValidatorIndex: 1},

View File

@@ -121,6 +121,11 @@ func (s *Store) ProposerSettingsExists(ctx context.Context) (bool, error) {
func (s *Store) SaveProposerSettings(ctx context.Context, settings *validatorServiceConfig.ProposerSettings) error {
_, span := trace.StartSpan(ctx, "validator.db.SaveProposerSettings")
defer span.End()
// nothing to save
if !settings.ShouldBeSaved() {
log.Warn("proposer settings are empty, nothing has been saved")
return nil
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(proposerSettingsBucket)
m, err := proto.Marshal(settings.ToPayload())

View File

@@ -16,6 +16,7 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//validator/accounts:go_default_library",
"//validator/db/iface:go_default_library",
"//validator/db/testing:go_default_library",
"//validator/keymanager:go_default_library",
"//validator/keymanager/remote-web3signer:go_default_library",

View File

@@ -227,6 +227,7 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context) error {
if dataDir == "" && c.wallet != nil {
dataDir = c.wallet.AccountsDir()
if dataDir == "" {
// skipcq: RVV-A0003
log.Fatal(
"Could not determine your system'c HOME path, please specify a --datadir you wish " +
"to use for your validator data",
@@ -310,6 +311,7 @@ func (c *ValidatorClient) initializeForWeb(cliCtx *cli.Context) error {
if dataDir == "" {
dataDir = cmd.DefaultDataDir()
if dataDir == "" {
// skipcq: RVV-A0003
log.Fatal(
"Could not determine your system'c HOME path, please specify a --datadir you wish " +
"to use for your validator data",
@@ -496,21 +498,20 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
if cliCtx.IsSet(flags.ProposerSettingsFlag.Name) && cliCtx.IsSet(flags.ProposerSettingsURLFlag.Name) {
return nil, errors.New("cannot specify both " + flags.ProposerSettingsFlag.Name + " and " + flags.ProposerSettingsURLFlag.Name)
}
builderConfigFromFlag, err := BuilderSettingsFromFlags(cliCtx)
if err != nil {
return nil, err
}
// is overridden by file and URL flags
if cliCtx.IsSet(flags.SuggestedFeeRecipientFlag.Name) &&
!cliCtx.IsSet(flags.ProposerSettingsFlag.Name) &&
!cliCtx.IsSet(flags.ProposerSettingsURLFlag.Name) {
suggestedFee := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
builderConfig, err := BuilderSettingsFromFlags(cliCtx)
if err != nil {
return nil, err
}
fileConfig = &validatorpb.ProposerSettingsPayload{
ProposerConfig: nil,
DefaultConfig: &validatorpb.ProposerOptionPayload{
FeeRecipient: suggestedFee,
Builder: builderConfig.ToPayload(),
Builder: builderConfigFromFlag.ToPayload(),
},
}
}
@@ -526,13 +527,13 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
}
}
// nothing is set, so just return nil
// this condition triggers if SuggestedFeeRecipientFlag,ProposerSettingsFlag or ProposerSettingsURLFlag did not create any settings
if fileConfig == nil {
if cliCtx.Bool(flags.EnableBuilderFlag.Name) {
return nil, fmt.Errorf("%s flag can only be used when a default fee recipient is present on the validator client", flags.EnableBuilderFlag.Name)
}
return nil, nil
// Checks the db or enable builder settings before starting the node without proposer settings
// starting the node without proposer settings, will skip API calls for push proposer settings and register validator
return handleNoProposerSettingsFlagsProvided(cliCtx, db, builderConfigFromFlag)
}
// convert file config to proposer config for internal use
vpSettings := &validatorServiceConfig.ProposerSettings{}
@@ -556,16 +557,13 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
},
BuilderConfig: validatorServiceConfig.ToBuilderConfig(fileConfig.DefaultConfig.Builder),
}
if vpSettings.DefaultConfig.BuilderConfig == nil {
builderConfig, err := BuilderSettingsFromFlags(cliCtx)
if err != nil {
return nil, err
}
vpSettings.DefaultConfig.BuilderConfig = builderConfig
}
if vpSettings.DefaultConfig.BuilderConfig != nil {
vpSettings.DefaultConfig.BuilderConfig.GasLimit = reviewGasLimit(vpSettings.DefaultConfig.BuilderConfig.GasLimit)
if builderConfigFromFlag != nil {
config := builderConfigFromFlag
if config.GasLimit == validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit) && vpSettings.DefaultConfig.BuilderConfig != nil {
config.GasLimit = vpSettings.DefaultConfig.BuilderConfig.GasLimit
}
vpSettings.DefaultConfig.BuilderConfig = config
}
if psExists {
@@ -594,14 +592,14 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
if err := warnNonChecksummedAddress(option.FeeRecipient); err != nil {
return nil, err
}
if option.Builder != nil {
option.Builder.GasLimit = reviewGasLimit(option.Builder.GasLimit)
} else {
builderConfig, err := BuilderSettingsFromFlags(cliCtx)
if err != nil {
return nil, err
if builderConfigFromFlag != nil {
config := builderConfigFromFlag.ToPayload()
if config.GasLimit == validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit) && option.Builder != nil {
config.GasLimit = option.Builder.GasLimit
}
option.Builder = builderConfig.ToPayload()
option.Builder = config
} else if option.Builder != nil {
option.Builder.GasLimit = reviewGasLimit(option.Builder.GasLimit)
}
o := &validatorServiceConfig.ProposerOption{
FeeRecipientConfig: &validatorServiceConfig.FeeRecipientConfig{
@@ -628,6 +626,49 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
return vpSettings, nil
}
func handleNoProposerSettingsFlagsProvided(cliCtx *cli.Context,
db iface.ValidatorDB,
builderConfigFromFlag *validatorServiceConfig.BuilderConfig) (*validatorServiceConfig.ProposerSettings, error) {
log.Info("no proposer settings files have been provided, attempting to load from db.")
// checks db if proposer settings exist if none is provided.
settings, err := db.ProposerSettings(cliCtx.Context)
if err == nil {
// process any overrides to builder settings
overrideBuilderSettings(settings, builderConfigFromFlag)
// if settings are empty
log.Info("successfully loaded proposer settings from db.")
return settings, nil
} else {
log.WithError(err).Warn("no proposer settings will be loaded from the db")
}
if cliCtx.Bool(flags.EnableBuilderFlag.Name) {
// if there are no proposer settings provided, create a default where fee recipient is not populated, this will be skipped for validator registration on validators that don't have a fee recipient set.
// skip saving to DB if only builder settings are provided until a trigger like keymanager API updates with fee recipient values
return &validatorServiceConfig.ProposerSettings{
DefaultConfig: &validatorServiceConfig.ProposerOption{
BuilderConfig: builderConfigFromFlag,
},
}, nil
}
return nil, nil
}
func overrideBuilderSettings(settings *validatorServiceConfig.ProposerSettings, builderConfigFromFlag *validatorServiceConfig.BuilderConfig) {
// override the db settings with the results based on whether the --enable-builder flag is provided.
if builderConfigFromFlag == nil {
log.Infof("proposer settings loaded from db. validator registration to builder is not enabled, please use the --%s flag if you wish to use a builder.", flags.EnableBuilderFlag.Name)
}
if settings.ProposeConfig != nil {
for key := range settings.ProposeConfig {
settings.ProposeConfig[key].BuilderConfig = builderConfigFromFlag
}
}
if settings.DefaultConfig != nil {
settings.DefaultConfig.BuilderConfig = builderConfigFromFlag
}
}
func BuilderSettingsFromFlags(cliCtx *cli.Context) (*validatorServiceConfig.BuilderConfig, error) {
if cliCtx.Bool(flags.EnableBuilderFlag.Name) {
gasLimit := validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit)

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/validator/accounts"
"github.com/prysmaticlabs/prysm/v4/validator/db/iface"
dbTest "github.com/prysmaticlabs/prysm/v4/validator/db/testing"
"github.com/prysmaticlabs/prysm/v4/validator/keymanager"
remoteweb3signer "github.com/prysmaticlabs/prysm/v4/validator/keymanager/remote-web3signer"
@@ -227,6 +228,7 @@ func TestProposerSettings(t *testing.T) {
urlResponse string
wantErr string
wantLog string
withdb func(db iface.ValidatorDB) error
validatorRegistrationEnabled bool
}{
{
@@ -552,7 +554,7 @@ func TestProposerSettings(t *testing.T) {
validatorRegistrationEnabled: true,
},
{
name: "Enable Builder flag does not override completed builder config",
name: "Enable Builder flag does override completed builder config",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "./testdata/good-prepare-beacon-proposer-config.yaml",
@@ -580,7 +582,7 @@ func TestProposerSettings(t *testing.T) {
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: false,
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
},
},
@@ -588,6 +590,185 @@ func TestProposerSettings(t *testing.T) {
},
validatorRegistrationEnabled: true,
},
{
name: "Only Enable Builder flag",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "",
url: "",
defaultfee: "",
},
},
want: func() *validatorserviceconfig.ProposerSettings {
return &validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
},
},
}
},
validatorRegistrationEnabled: true,
},
{
name: "No Flags but saved to DB with builder and override removed builder data",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "",
url: "",
defaultfee: "",
},
},
want: func() *validatorserviceconfig.ProposerSettings {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
return &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
},
}
},
withdb: func(db iface.ValidatorDB) error {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
settings := &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(40000000),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
},
},
}
return db.SaveProposerSettings(context.Background(), settings)
},
},
{
name: "Enable builder flag but saved to DB without builder data now includes builder data",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "",
url: "",
defaultfee: "",
},
},
want: func() *validatorserviceconfig.ProposerSettings {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
return &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
},
},
}
},
withdb: func(db iface.ValidatorDB) error {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
settings := &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
},
}
return db.SaveProposerSettings(context.Background(), settings)
},
validatorRegistrationEnabled: true,
},
{
name: "No flags, but saved to database",
args: args{
proposerSettingsFlagValues: &proposerSettingsFlag{
dir: "",
url: "",
defaultfee: "",
},
},
want: func() *validatorserviceconfig.ProposerSettings {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
return &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
},
}
},
withdb: func(db iface.ValidatorDB) error {
key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a")
require.NoError(t, err)
settings := &validatorserviceconfig.ProposerSettings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorserviceconfig.ProposerOption{
bytesutil.ToBytes48(key1): {
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
},
},
},
DefaultConfig: &validatorserviceconfig.ProposerOption{
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
},
},
}
return db.SaveProposerSettings(context.Background(), settings)
},
},
{
name: "No flags set means empty config",
args: args{
@@ -680,6 +861,10 @@ func TestProposerSettings(t *testing.T) {
}
cliCtx := cli.NewContext(&app, set, nil)
validatorDB := dbTest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
if tt.withdb != nil {
err := tt.withdb(validatorDB)
require.NoError(t, err)
}
got, err := proposerSettings(cliCtx, validatorDB)
if tt.wantErr != "" {
require.ErrorContains(t, tt.wantErr, err)
@@ -692,17 +877,29 @@ func TestProposerSettings(t *testing.T) {
}
w := tt.want()
require.DeepEqual(t, w, got)
})
}
}
// return an error if the user is using builder settings without any default fee recipient
func TestProposerSettings_EnableBuilder_noFeeRecipient(t *testing.T) {
validatorDB := dbTest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
func Test_ProposerSettingsWithOnlyBuilder_DoesNotSaveInDB(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.Bool(flags.EnableBuilderFlag.Name, true, "")
cliCtx := cli.NewContext(&app, set, nil)
_, err := proposerSettings(cliCtx, validatorDB)
require.ErrorContains(t, "can only be used when a default fee recipient is present on the validator client", err)
validatorDB := dbTest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
got, err := proposerSettings(cliCtx, validatorDB)
require.NoError(t, err)
_, err = validatorDB.ProposerSettings(cliCtx.Context)
require.ErrorContains(t, "no proposer settings found in bucket", err)
want := &validatorserviceconfig.ProposerSettings{
DefaultConfig: &validatorserviceconfig.ProposerOption{
BuilderConfig: &validatorserviceconfig.BuilderConfig{
Enabled: true,
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
Relays: nil,
},
},
}
require.DeepEqual(t, want, got)
}

View File

@@ -102,6 +102,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//config/validator/service:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
@@ -138,6 +139,7 @@ go_test(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
],
)

View File

@@ -205,7 +205,6 @@ func TestServer_VoluntaryExit(t *testing.T) {
mockNodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Times(2).
Return(&ethpb.Genesis{GenesisTime: genesisTime}, nil)
mockValidatorClient.EXPECT().

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"custom_hooks.go",
"endpoint_factory.go",
"structs.go",
],
@@ -16,11 +17,17 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["structs_test.go"],
srcs = [
"custom_hooks_test.go",
"structs_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/gateway/apimiddleware:go_default_library",
"//config/fieldparams:go_default_library",
"//proto/eth/service:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,39 @@
package apimiddleware
import (
"bytes"
"encoding/json"
"io"
"net/http"
"strconv"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
)
// "/eth/v1/validator/{pubkey}/voluntary_exit" POST expects epoch as a query param.
// This hook adds the query param to the body so that it is a valid POST request as
// grpc-gateway does not handle query params in POST requests.
func setVoluntaryExitEpoch(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SetVoluntaryExitRequestJson); ok {
var epoch = req.URL.Query().Get("epoch")
// To handle the request without the query param
if epoch == "" {
epoch = "0"
}
_, err := strconv.ParseUint(epoch, 10, 64)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "invalid epoch")
}
j := &SetVoluntaryExitRequestJson{Epoch: epoch}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal epoch")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}

View File

@@ -0,0 +1,49 @@
package apimiddleware
import (
"bytes"
"encoding/json"
"fmt"
"net/http/httptest"
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestSetVoluntaryExitEpoch(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SetVoluntaryExitRequestJson{},
}
epoch := "300"
var body bytes.Buffer
request := httptest.NewRequest("POST", fmt.Sprintf("http://foo.example?epoch=%s", epoch), &body)
runDefault, errJson := setVoluntaryExitEpoch(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
var b SetVoluntaryExitRequestJson
err := json.NewDecoder(request.Body).Decode(&b)
require.NoError(t, err)
require.Equal(t, epoch, b.Epoch)
})
t.Run("invalid query returns error", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SetVoluntaryExitRequestJson{},
}
epoch := "/12"
var body bytes.Buffer
request := httptest.NewRequest("POST", fmt.Sprintf("http://foo.example?epoch=%s", epoch), &body)
runDefault, errJson := setVoluntaryExitEpoch(endpoint, nil, request)
assert.NotNil(t, errJson)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
err := errors.New(errJson.Msg())
assert.ErrorContains(t, "invalid epoch", err)
})
}

View File

@@ -20,6 +20,7 @@ func (*ValidatorEndpointFactory) Paths() []string {
"/eth/v1/remotekeys",
"/eth/v1/validator/{pubkey}/feerecipient",
"/eth/v1/validator/{pubkey}/gas_limit",
"/eth/v1/validator/{pubkey}/voluntary_exit",
}
}
@@ -47,6 +48,12 @@ func (*ValidatorEndpointFactory) Create(path string) (*apimiddleware.Endpoint, e
endpoint.GetResponse = &GetGasLimitResponseJson{}
endpoint.PostRequest = &SetGasLimitRequestJson{}
endpoint.DeleteRequest = &DeleteGasLimitRequestJson{}
case "/eth/v1/validator/{pubkey}/voluntary_exit":
endpoint.PostRequest = &SetVoluntaryExitRequestJson{}
endpoint.PostResponse = &SetVoluntaryExitResponseJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: setVoluntaryExitEpoch,
}
default:
return nil, errors.New("invalid path")
}

View File

@@ -100,3 +100,22 @@ type SetGasLimitRequestJson struct {
type DeleteGasLimitRequestJson struct {
Pubkey string `json:"pubkey" hex:"true"`
}
type SetVoluntaryExitRequestJson struct {
Pubkey string `json:"pubkey" hex:"true"`
Epoch string `json:"epoch"`
}
type SetVoluntaryExitResponseJson struct {
SignedVoluntaryExit *SignedVoluntaryExitJson `json:"data"`
}
type SignedVoluntaryExitJson struct {
VoluntaryExit *VoluntaryExitJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type VoluntaryExitJson struct {
Epoch string `json:"epoch"`
ValidatorIndex string `json:"validator_index"`
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbservice "github.com/prysmaticlabs/prysm/v4/proto/eth/service"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/validator/client"
"github.com/prysmaticlabs/prysm/v4/validator/keymanager"
"github.com/prysmaticlabs/prysm/v4/validator/keymanager/derived"
slashingprotection "github.com/prysmaticlabs/prysm/v4/validator/slashing-protection-history"
@@ -24,6 +25,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// ListKeystores implements the standard validator key management API.
@@ -598,7 +600,7 @@ func (s *Server) SetFeeRecipientByPubkey(ctx context.Context, req *ethpbservice.
}
case settings.ProposeConfig == nil:
var builderConfig *validatorServiceConfig.BuilderConfig
if settings.DefaultConfig != nil {
if settings.DefaultConfig != nil && settings.DefaultConfig.BuilderConfig != nil {
builderConfig = settings.DefaultConfig.BuilderConfig.Clone()
}
settings.ProposeConfig = map[[fieldparams.BLSPubkeyLength]byte]*validatorServiceConfig.ProposerOption{
@@ -617,7 +619,7 @@ func (s *Server) SetFeeRecipientByPubkey(ctx context.Context, req *ethpbservice.
}
} else {
var builderConfig = &validatorServiceConfig.BuilderConfig{}
if settings.DefaultConfig != nil {
if settings.DefaultConfig != nil && settings.DefaultConfig.BuilderConfig != nil {
builderConfig = settings.DefaultConfig.BuilderConfig.Clone()
}
settings.ProposeConfig[bytesutil.ToBytes48(validatorKey)] = &validatorServiceConfig.ProposerOption{
@@ -679,3 +681,51 @@ func validatePublicKey(pubkey []byte) error {
}
return nil
}
// SetVoluntaryExit creates a signed voluntary exit message and returns a VoluntaryExit object.
func (s *Server) SetVoluntaryExit(ctx context.Context, req *ethpbservice.SetVoluntaryExitRequest) (*ethpbservice.SetVoluntaryExitResponse, error) {
if s.validatorService == nil {
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready")
}
if err := validatePublicKey(req.Pubkey); err != nil {
return nil, status.Error(codes.FailedPrecondition, err.Error())
}
if s.wallet == nil {
return nil, status.Error(codes.FailedPrecondition, "No wallet found")
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, err
}
if req.Epoch == 0 {
genesisResponse, err := s.beaconNodeClient.GetGenesis(ctx, &emptypb.Empty{})
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not create voluntary exit: %v", err)
}
epoch, err := client.CurrentEpoch(genesisResponse.GenesisTime)
if err != nil {
return nil, status.Errorf(codes.Internal, "gRPC call to get genesis time failed: %v", err)
}
req.Epoch = epoch
}
sve, err := client.CreateSignedVoluntaryExit(
ctx,
s.beaconNodeValidatorClient,
km.Sign,
req.Pubkey,
req.Epoch,
)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not create voluntary exit: %v", err)
}
return &ethpbservice.SetVoluntaryExitResponse{
Data: &ethpbservice.SetVoluntaryExitResponse_SignedVoluntaryExit{
Message: &ethpbservice.SetVoluntaryExitResponse_SignedVoluntaryExit_VoluntaryExit{
Epoch: uint64(sve.Exit.Epoch),
ValidatorIndex: uint64(sve.Exit.ValidatorIndex),
},
Signature: sve.Signature,
},
}, nil
}

View File

@@ -1,12 +1,14 @@
package rpc
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -17,6 +19,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
validatorserviceconfig "github.com/prysmaticlabs/prysm/v4/config/validator/service"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -41,6 +44,8 @@ import (
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestServer_ListKeystores(t *testing.T) {
@@ -763,7 +768,8 @@ func TestServer_ListFeeRecipientByPubkey(t *testing.T) {
mockValidatorClient := validatormock.NewMockValidatorClient(ctrl)
m := &mock.MockValidator{}
m.SetProposerSettings(tt.args)
err := m.SetProposerSettings(ctx, tt.args)
require.NoError(t, err)
if tt.args == nil {
mockValidatorClient.EXPECT().GetFeeRecipientByPubKey(gomock.Any(), gomock.Any()).Return(tt.cached, nil)
@@ -999,7 +1005,8 @@ func TestServer_FeeRecipientByPubkey(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &mock.MockValidator{}
m.SetProposerSettings(tt.proposerSettings)
err := m.SetProposerSettings(ctx, tt.proposerSettings)
require.NoError(t, err)
validatorDB := dbtest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
// save a default here
@@ -1101,7 +1108,8 @@ func TestServer_DeleteFeeRecipientByPubkey(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &mock.MockValidator{}
m.SetProposerSettings(tt.proposerSettings)
err := m.SetProposerSettings(ctx, tt.proposerSettings)
require.NoError(t, err)
validatorDB := dbtest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
vs, err := client.NewValidatorService(ctx, &client.Config{
Validator: m,
@@ -1197,7 +1205,8 @@ func TestServer_GetGasLimit(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &mock.MockValidator{}
m.SetProposerSettings(tt.args)
err := m.SetProposerSettings(ctx, tt.args)
require.NoError(t, err)
vs, err := client.NewValidatorService(ctx, &client.Config{
Validator: m,
})
@@ -1346,7 +1355,8 @@ func TestServer_SetGasLimit(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &mock.MockValidator{}
m.SetProposerSettings(tt.proposerSettings)
err := m.SetProposerSettings(ctx, tt.proposerSettings)
require.NoError(t, err)
validatorDB := dbtest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
vs, err := client.NewValidatorService(ctx, &client.Config{
Validator: m,
@@ -1515,7 +1525,8 @@ func TestServer_DeleteGasLimit(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &mock.MockValidator{}
m.SetProposerSettings(tt.proposerSettings)
err := m.SetProposerSettings(ctx, tt.proposerSettings)
require.NoError(t, err)
validatorDB := dbtest.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{})
vs, err := client.NewValidatorService(ctx, &client.Config{
Validator: m,
@@ -1540,3 +1551,116 @@ func TestServer_DeleteGasLimit(t *testing.T) {
})
}
}
func TestServer_SetVoluntaryExit(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
defaultWalletPath = setupWalletDir(t)
opts := []accounts.Option{
accounts.WithWalletDir(defaultWalletPath),
accounts.WithKeymanagerType(keymanager.Derived),
accounts.WithWalletPassword(strongPass),
accounts.WithSkipMnemonicConfirm(true),
}
acc, err := accounts.NewCLIManager(opts...)
require.NoError(t, err)
w, err := acc.WalletCreate(ctx)
require.NoError(t, err)
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
require.NoError(t, err)
m := &mock.MockValidator{Km: km}
vs, err := client.NewValidatorService(ctx, &client.Config{
Validator: m,
})
require.NoError(t, err)
dr, ok := km.(*derived.Keymanager)
require.Equal(t, true, ok)
err = dr.RecoverAccountsFromMnemonic(ctx, mocks.TestMnemonic, derived.DefaultMnemonicLanguage, "", 1)
require.NoError(t, err)
pubKeys, err := dr.FetchValidatingPublicKeys(ctx)
require.NoError(t, err)
beaconClient := validatormock.NewMockValidatorClient(ctrl)
mockNodeClient := validatormock.NewMockNodeClient(ctrl)
// Any time in the past will suffice
genesisTime := &timestamppb.Timestamp{
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
}
beaconClient.EXPECT().ValidatorIndex(gomock.Any(), &eth.ValidatorIndexRequest{PublicKey: pubKeys[0][:]}).
Times(3).
Return(&eth.ValidatorIndexResponse{Index: 2}, nil)
beaconClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Times(3).
Return(&eth.DomainResponse{SignatureDomain: make([]byte, 32)}, nil /*err*/)
mockNodeClient.EXPECT().
GetGenesis(gomock.Any(), gomock.Any()).
Times(3).
Return(&eth.Genesis{GenesisTime: genesisTime}, nil)
s := &Server{
validatorService: vs,
beaconNodeValidatorClient: beaconClient,
wallet: w,
beaconNodeClient: mockNodeClient,
}
type want struct {
epoch primitives.Epoch
validatorIndex uint64
signature []byte
}
tests := []struct {
name string
pubkey []byte
epoch primitives.Epoch
w want
}{
{
name: "Ok: with epoch",
epoch: 30000000,
w: want{
epoch: 30000000,
validatorIndex: 2,
signature: []uint8{175, 157, 5, 134, 253, 2, 193, 35, 176, 43, 217, 36, 39, 240, 24, 79, 207, 133, 150, 7, 237, 16, 54, 244, 64, 27, 244, 17, 8, 225, 140, 1, 172, 24, 35, 95, 178, 116, 172, 213, 113, 182, 193, 61, 192, 65, 162, 253, 19, 202, 111, 164, 195, 215, 0, 205, 95, 7, 30, 251, 244, 157, 210, 155, 238, 30, 35, 219, 177, 232, 174, 62, 218, 69, 23, 249, 180, 140, 60, 29, 190, 249, 229, 95, 235, 236, 81, 33, 60, 4, 201, 227, 70, 239, 167, 2},
},
},
{
name: "Ok: epoch not set",
w: want{
epoch: 0,
validatorIndex: 2,
signature: []uint8{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := s.SetVoluntaryExit(ctx, &ethpbservice.SetVoluntaryExitRequest{Pubkey: pubKeys[0][:], Epoch: tt.epoch})
require.NoError(t, err)
if tt.w.epoch == 0 {
genesisResponse, err := s.beaconNodeClient.GetGenesis(ctx, &emptypb.Empty{})
require.NoError(t, err)
tt.w.epoch, err = client.CurrentEpoch(genesisResponse.GenesisTime)
require.NoError(t, err)
resp2, err := s.SetVoluntaryExit(ctx, &ethpbservice.SetVoluntaryExitRequest{Pubkey: pubKeys[0][:], Epoch: tt.epoch})
require.NoError(t, err)
tt.w.signature = resp2.Data.Signature
}
require.Equal(t, uint64(tt.w.epoch), resp.Data.Message.Epoch)
require.Equal(t, tt.w.validatorIndex, resp.Data.Message.ValidatorIndex)
require.NotEmpty(t, resp.Data.Signature)
ok = bytes.Equal(tt.w.signature, resp.Data.Signature)
require.Equal(t, true, ok)
})
}
}