Compare commits

...

24 Commits

Author SHA1 Message Date
Potuz
3679406a35 Merge branch 'develop' into forkchoice_bench 2024-03-12 11:43:28 -03:00
terence
697bcd418c Save invalid block to temp save-invalid-block-temp (#13722) 2024-03-11 20:34:44 +00:00
terence
ec7949fa4b Use justified checkpoint from head state to build attestation (#13703) 2024-03-11 15:05:40 +00:00
Nishant Das
cb8eb4e955 fix context deadline rejections (#13716) 2024-03-11 04:51:02 +00:00
Chanh Le
800f3b572f chore(execution): Clean up unreachable code; use new(big.Int) instead of big.NewInt(0) (#13715)
* refactor with builtin min/max

* use new(big.Int) for more efficiency
2024-03-11 00:31:55 +00:00
terence
9d3af41acb Remove unused deneb code (#13712)
* Remove unused deneb code

* Gazelle
2024-03-09 00:12:26 +00:00
kasey
07a0a95ee7 Blob verification spectest (#13707)
* use real blob verifier in forkchoice spectest

* wip

* Use real blob sidecar for test

* Set file db correctly

* correctly handle blob cases where valid=false

* work-around spectest's weird Fork in genesis state

* gaz

* revert T-money's log level change

* rm whitespace

* unskip minimal test

* Preston's feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-03-08 18:20:38 +00:00
Radosław Kapka
9e7352704c Optimize SubscribeCommitteeSubnets VC action (#13702)
* Optimize `SubscribeCommitteeSubnets` VC action

* test fixes

* remove newline

* review
2024-03-08 18:08:35 +00:00
Nishant Das
2616de1eb1 Check Unrealized Justification Balances In Spectests (#13710)
* add them

* Ensure activation epoch does not overflow

* add them all in

* better check for overflow

* fix tests

* fix tests

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-03-08 14:49:34 +00:00
Manu NALEPA
b2e3c29ab3 Improve logging. (#13708)
* Improve logging.

* Make deepsource happy.

* Fix comment.
2024-03-08 12:23:34 +00:00
Preston Van Loon
83538251aa Remove DOMAIN_BLOB_SIDECAR. See https://github.com/ethereum/consensus-specs/pull/3542 (#13706) 2024-03-08 03:55:46 +00:00
Stefan
2442280e37 Fix/race receive block (#13700)
* blob save: add better data checking for empty blob issues (#13647)

(cherry picked from commit daad29d0de)

* avoid part path collisions with mem addr entropy (#13648)

* avoid part path collisions with mem addr entropy

* Regression test

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
(cherry picked from commit 4c66e4d060)

* fix error race

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
2024-03-07 19:46:03 +00:00
Stefan
4608569495 fix race condition when pinging peers (#13701) 2024-03-07 19:21:55 +00:00
Preston Van Loon
20d013a30b Unskip merkle proof tests (#13704) 2024-03-07 19:18:25 +00:00
Potuz
b0a2115a26 Fix UJ (#13688)
* Fix UJ

* gate slashed

* don't filter slashed for active balance

* don't overflow

* fix tests

* fix tests
2024-03-06 20:46:16 +00:00
Potuz
102518e106 pass justified=finalized in Prater (#13695)
* pass justified=finalized in Prater

* fix gazelle mess
2024-03-06 18:45:41 +00:00
james-prysm
e49ed4d554 keymanager api: lowercase statuses (#13696)
* cleanup

* adding test

* address small comment

* gaz
2024-03-06 16:30:17 +00:00
Manu NALEPA
21775eed52 Fix VC DB conversion when no proposer settings is defined and add Experimental flag in the --enable-minimal-slashing-protection help. (#13691)
* VC: Allow DB conversion without proposer settings.

* `enable-minimal-slashing-protection` flag: Add `Experimental warning`.
2024-03-06 14:48:18 +00:00
Radosław Kapka
ee9274a9bc REST VC metrics (#13588)
* REST VC metrics

* validator status is not used

* refactor

* remove test

* server-side

* server-side per endpoint

* cleanup

* add config endpoints

* add proper HTTP methods to endpoints

* initialize missing fields
2024-03-05 18:15:46 +00:00
prylabs-bulldozer[bot]
6d96449fb5 Merge refs/heads/develop into forkchoice_bench 2023-10-18 17:34:20 +00:00
Potuz
89fae22993 convert to test 2023-10-18 13:28:56 -03:00
Potuz
93d2d38130 Merge branch 'develop' into forkchoice_bench 2023-10-17 16:20:51 -03:00
Potuz
ae61abd719 only measure last insertion 2023-10-17 16:19:12 -03:00
Potuz
0fee4a609e forkchoice benchmark quadratic behavior 2023-10-17 15:39:19 -03:00
96 changed files with 3555 additions and 2602 deletions

View File

@@ -6,6 +6,7 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
visibility = ["//visibility:public"],

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
@@ -74,7 +74,12 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
log.WithFields(logrus.Fields{
"name": vu.Config.ConfigName,
"fork": version.String(vu.Fork),
}).Info("Detected supported config in remote finalized state")
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")

View File

@@ -22,7 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
)
const (
@@ -309,7 +309,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"validatorIndex": w.ValidatorIndex,
"withdrawalAddress": w.ToExecutionAddress,
}).Error(failure.Message)
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)

5
api/client/beacon/log.go Normal file
View File

@@ -0,0 +1,5 @@
package beacon
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "beacon")

View File

@@ -97,6 +97,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
@@ -105,6 +106,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")

View File

@@ -290,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -1189,11 +1189,3 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
require.NoError(b, err)
}
}
func emptyEth1data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
}
}

View File

@@ -15,8 +15,6 @@ import (
)
var (
// ErrEmptyExecutionBlock occurs when the execution block is nil.
ErrEmptyExecutionBlock = errors.New("empty execution block")
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
// ErrInvalidDepositCount occurs when the value for mix in length is 0.

View File

@@ -1,15 +1,9 @@
package cache
import (
"errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
// a ProposerIndices struct.
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
// ProposerIndices defines the cached struct for proposer indices.
type ProposerIndices struct {
BlockRoot [32]byte

View File

@@ -40,7 +40,6 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -114,16 +113,6 @@ func Test_commitmentsToCheck(t *testing.T) {
}
}
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
return nil
}
type mockDA struct {
t *testing.T
scs []blocks.ROBlob
err error
}
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)

View File

@@ -19,9 +19,6 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {

View File

@@ -44,7 +44,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
],

View File

@@ -6,9 +6,7 @@ import (
"path"
"sync"
"testing"
"time"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -129,29 +127,6 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
}
// pollUntil polls a condition function until it returns true or a timeout is reached.
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
var remainingFolders []os.FileInfo
var err error
// Define the condition function for polling
conditionFunc := func() bool {
remainingFolders, err = afero.ReadDir(fs, ".")
require.NoError(t, err)
return len(remainingFolders) == expected
}
startTime := time.Now()
for {
if conditionFunc() {
break // Condition met, exit the loop
}
if time.Since(startTime) > 30*time.Second {
return errors.New("timeout")
}
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
}
require.Equal(t, expected, len(remainingFolders))
return nil
}
func TestBlobIndicesBounds(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)

View File

@@ -137,7 +137,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
}
}
datafile := StoreDatafilePath(dirPath)
log.Infof("Opening Bolt DB at %s", datafile)
log.WithField("path", datafile).Info("Opening Bolt DB")
boltDB, err := bolt.Open(
datafile,
params.BeaconIoConfig().ReadWritePermissions,

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/sirupsen/logrus"
)
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
@@ -27,7 +28,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
}
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
log.WithFields(logrus.Fields{
"configName": cf.Config.ConfigName,
"forkName": version.String(cf.Fork),
}).Info("Detected supported config for state & block version")
state, err := cf.UnmarshalBeaconState(serState)
if err != nil {
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
@@ -57,13 +62,13 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")
}
// save state
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
if err = s.SaveState(ctx, state, blockRoot); err != nil {
return errors.Wrap(err, "could not save state")
}

View File

@@ -110,7 +110,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return nil, errors.Wrap(errBlockTimeTooLate, fmt.Sprintf("(%d > %d)", time, latestBlkTime))
}
// Initialize a pointer to eth1 chain's history to start our search from.
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorNum := new(big.Int).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
var numOfBlocks uint64
@@ -156,15 +156,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMaxTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
}
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMinTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
// target time. This method is used when head.time > targetTime
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
for bn := upperBoundBlk; ; bn = new(big.Int).Sub(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -181,7 +181,7 @@ func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big
// Performs a search to find a target eth1 block which is just earlier than or equal to the
// target time. This method is used when head.time < targetTime
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
for bn := lowerBoundBlk; ; bn = new(big.Int).Add(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -201,7 +201,7 @@ func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big
}
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
bn := big.NewInt(0).SetUint64(bNum)
bn := new(big.Int).SetUint64(bNum)
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
if err != nil {
return nil, err

View File

@@ -23,9 +23,6 @@ var (
ErrInvalidPayloadAttributes = errors.New("payload attributes are invalid / inconsistent")
// ErrUnknownPayloadStatus when the payload status is unknown.
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")
// ErrAcceptedSyncingPayloadStatus when the status of the payload is syncing or accepted.
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
// ErrInvalidPayloadStatus when the status of the payload is invalid.

View File

@@ -298,9 +298,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
// Start from the deployment block if our last requested block
// is behind it. This is as the deposit logs can only start from the
// block of the deployment of the deposit contract.
if deploymentBlock > currentBlockNum {
currentBlockNum = deploymentBlock
}
currentBlockNum = max(currentBlockNum, deploymentBlock)
// To store all blocks.
headersMap := make(map[uint64]*types.HeaderInfo)
rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
@@ -384,15 +382,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
end := currentBlockNum + batchSize
// Appropriately bound the request, as we do not
// want request blocks beyond the current follow distance.
if end > latestFollowHeight {
end = latestFollowHeight
}
end = min(end, latestFollowHeight)
query := ethereum.FilterQuery{
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
FromBlock: new(big.Int).SetUint64(start),
ToBlock: new(big.Int).SetUint64(end),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
@@ -400,7 +396,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
query.ToBlock = new(big.Int).SetUint64(latestFollowHeight)
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
@@ -482,11 +478,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
// Cache eth1 block header here.
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
_, err := s.BlockHashByHeight(ctx, new(big.Int).SetUint64(i))
if err != nil {
return err
}
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
err = s.ProcessETH1Block(ctx, new(big.Int).SetUint64(i))
if err != nil {
return err
}

View File

@@ -77,7 +77,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
if currClient != nil {
currClient.Close()
}
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
return
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")

View File

@@ -415,14 +415,11 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
requestRange := (endBlock - startBlock) + 1
elems := make([]gethRPC.BatchElem, 0, requestRange)
headers := make([]*types.HeaderInfo, 0, requestRange)
if requestRange == 0 {
return headers, nil
}
for i := startBlock; i <= endBlock; i++ {
header := &types.HeaderInfo{}
elems = append(elems, gethRPC.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
Args: []interface{}{hexutil.EncodeBig(new(big.Int).SetUint64(i)), false},
Result: header,
Error: error(nil),
})
@@ -675,9 +672,7 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
// the allotted limit.
endReq -= 1
}
if endReq > end {
endReq = end
}
endReq = min(endReq, end)
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
_, err := s.batchRequestHeaders(startReq, endReq)
if err != nil {

View File

@@ -47,6 +47,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"benchmarks_test.go",
"ffg_update_test.go",
"forkchoice_test.go",
"last_root_test.go",

View File

@@ -0,0 +1,65 @@
package doublylinkedtree
import (
"context"
"crypto/rand"
"fmt"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func Test_LinearBehavior(t *testing.T) {
// steps := []int{100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 5000, 10000, 20000, 40000}
steps := []int{100, 500, 1000, 5000, 10000, 20000, 40000}
durationsAccumulated := make([]time.Duration, len(steps))
durationsLastInsertion := make([]time.Duration, len(steps))
ctx := context.Background()
for i := range steps {
fmt.Println("Starting test: ", steps[i])
t.Run(fmt.Sprintf("non_finalizing_epochs_%d", steps[i]), func(tt *testing.T) {
start := time.Now()
f := setup(0, 0)
parentRoot := [32]byte{}
newRoot := [32]byte{}
var slot primitives.Slot
var err error
for epoch := primitives.Epoch(0); epoch < primitives.Epoch(steps[i]); epoch++ {
slot, err = slots.EpochStart(epoch)
require.NoError(tt, err)
_, err = rand.Read(newRoot[:])
require.NoError(tt, err)
st, root, err := prepareForkchoiceState(ctx, slot, newRoot, parentRoot, [32]byte{}, 0, 0)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.NoError(tt, f.SetOptimisticToValid(ctx, newRoot))
parentRoot = newRoot
}
durationsAccumulated[i] = time.Since(start)
root, err := f.Head(ctx)
require.NoError(tt, err)
require.Equal(tt, newRoot, root)
durationsLastInsertion[i] = time.Since(start) - durationsAccumulated[i]
// Check that the latencies are compatible with previous
// latencies. We expect the total insertion and head
// computation to be linear. Local tests show 7ms per
// insertion and 40ns per epoch for head computation.
// Numbers here are conservative and should fail on
// quadratic behavior
fmt.Println("Duration accumulated: ", durationsAccumulated[i], ". Duration last insertion: ", durationsLastInsertion[i])
if i == 0 {
return
}
for j := 0; j < i; j++ {
require.Equal(tt, true, durationsAccumulated[i] < time.Duration(steps[i]*steps[i])*time.Millisecond)
if steps[i] > 1000 {
require.Equal(tt, true, durationsLastInsertion[i] < time.Duration(steps[i]*steps[i]/1000)*time.Microsecond)
}
}
})
}
}

View File

@@ -56,6 +56,7 @@ type Service struct {
started bool
isPreGenesis bool
pingMethod func(ctx context.Context, id peer.ID) error
pingMethodLock sync.RWMutex
cancel context.CancelFunc
cfg *Config
peers *peers.Status
@@ -354,10 +355,14 @@ func (s *Service) MetadataSeq() uint64 {
// AddPingMethod adds the metadata ping rpc method to the p2p service, so that it can
// be used to refresh ENR.
func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) {
s.pingMethodLock.Lock()
s.pingMethod = reqFunc
s.pingMethodLock.Unlock()
}
func (s *Service) pingPeers() {
s.pingMethodLock.RLock()
defer s.pingMethodLock.RUnlock()
if s.pingMethod == nil {
return
}

View File

@@ -8,11 +8,11 @@ var (
ErrInvalidFinalizedRoot = errors.New("invalid finalized root")
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
ErrGeneric = errors.New("internal service error")
ErrInvalidParent = errors.New("mismatched parent root")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrResourceUnavailable = errors.New("resource requested unavailable")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrResourceUnavailable = errors.New("resource requested unavailable")
)

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"endpoints.go",
"log.go",
"service.go",
],
@@ -58,6 +59,9 @@ go_library(
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
"@org_golang_google_grpc//:go_default_library",
@@ -70,23 +74,14 @@ go_library(
go_test(
name = "go_default_test",
size = "medium",
srcs = ["service_test.go"],
srcs = [
"endpoints_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/blob:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/light-client:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/prysm/beacon:go_default_library",
"//beacon-chain/rpc/prysm/node:go_default_library",
"//beacon-chain/rpc/prysm/validator:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -401,7 +401,19 @@ func (s *Service) GetAttestationData(
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get target root")}
}
justifiedCheckpoint := s.FinalizedFetcher.CurrentJustifiedCheckpt()
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get head state")}
}
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) { // Ensure justified checkpoint safety by processing head state across the boundary.
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
}
}
justifiedCheckpoint := headState.CurrentJustifiedCheckpoint()
if err = s.AttestationCache.Put(&cache.AttestationConsensusData{
Slot: req.Slot,
HeadRoot: headRoot,

View File

@@ -0,0 +1,780 @@
package rpc
import (
"net/http"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/config"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
)
type endpoint struct {
template string
name string
handler http.HandlerFunc
methods []string
}
func (s *Service) endpoints(
enableDebug bool,
blocker lookup.Blocker,
stater lookup.Stater,
rewardFetcher rewards.BlockRewardsFetcher,
validatorServer *validatorv1alpha1.Server,
coreService *core.Service,
ch *stategen.CanonicalHistory,
) []endpoint {
endpoints := make([]endpoint, 0)
endpoints = append(endpoints, s.rewardsEndpoints(blocker, stater, rewardFetcher)...)
endpoints = append(endpoints, s.builderEndpoints(stater)...)
endpoints = append(endpoints, s.blobEndpoints(blocker)...)
endpoints = append(endpoints, s.validatorEndpoints(validatorServer, stater, coreService, rewardFetcher)...)
endpoints = append(endpoints, s.nodeEndpoints()...)
endpoints = append(endpoints, s.beaconEndpoints(ch, stater, blocker, validatorServer, coreService)...)
endpoints = append(endpoints, s.configEndpoints()...)
endpoints = append(endpoints, s.lightClientEndpoints(blocker, stater)...)
endpoints = append(endpoints, s.eventsEndpoints()...)
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
if enableDebug {
endpoints = append(endpoints, s.debugEndpoints(stater)...)
}
return endpoints
}
func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater, rewardFetcher rewards.BlockRewardsFetcher) []endpoint {
server := &rewards.Server{
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
BlockRewardFetcher: rewardFetcher,
}
const namespace = "rewards"
return []endpoint{
{
template: "/eth/v1/beacon/rewards/blocks/{block_id}",
name: namespace + ".BlockRewards",
handler: server.BlockRewards,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/rewards/attestations/{epoch}",
name: namespace + ".AttestationRewards",
handler: server.AttestationRewards,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/rewards/sync_committee/{block_id}",
name: namespace + ".SyncCommitteeRewards",
handler: server.SyncCommitteeRewards,
methods: []string{http.MethodPost},
},
}
}
func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
server := &builder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
}
const namespace = "builder"
return []endpoint{
{
template: "/eth/v1/builder/states/{state_id}/expected_withdrawals",
name: namespace + ".ExpectedWithdrawals",
handler: server.ExpectedWithdrawals,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
server := &blob.Server{
Blocker: blocker,
}
const namespace = "blob"
return []endpoint{
{
template: "/eth/v1/beacon/blob_sidecars/{block_id}",
name: namespace + ".Blobs",
handler: server.Blobs,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) validatorEndpoints(
validatorServer *validatorv1alpha1.Server,
stater lookup.Stater,
coreService *core.Service,
rewardFetcher rewards.BlockRewardsFetcher,
) []endpoint {
server := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
V1Alpha1Server: validatorServer,
Stater: stater,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
OperationNotifier: s.cfg.OperationNotifier,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
CoreService: coreService,
BlockRewardFetcher: rewardFetcher,
}
const namespace = "validator"
return []endpoint{
{
template: "/eth/v1/validator/aggregate_attestation",
name: namespace + ".GetAggregateAttestation",
handler: server.GetAggregateAttestation,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/contribution_and_proofs",
name: namespace + ".SubmitContributionAndProofs",
handler: server.SubmitContributionAndProofs,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/aggregate_and_proofs",
name: namespace + ".SubmitAggregateAndProofs",
handler: server.SubmitAggregateAndProofs,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/sync_committee_contribution",
name: namespace + ".ProduceSyncCommitteeContribution",
handler: server.ProduceSyncCommitteeContribution,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/sync_committee_subscriptions",
name: namespace + ".SubmitSyncCommitteeSubscription",
handler: server.SubmitSyncCommitteeSubscription,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/beacon_committee_subscriptions",
name: namespace + ".SubmitBeaconCommitteeSubscription",
handler: server.SubmitBeaconCommitteeSubscription,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/attestation_data",
name: namespace + ".GetAttestationData",
handler: server.GetAttestationData,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/register_validator",
name: namespace + ".RegisterValidator",
handler: server.RegisterValidator,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/duties/attester/{epoch}",
name: namespace + ".GetAttesterDuties",
handler: server.GetAttesterDuties,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/duties/proposer/{epoch}",
name: namespace + ".GetProposerDuties",
handler: server.GetProposerDuties,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/duties/sync/{epoch}",
name: namespace + ".GetSyncCommitteeDuties",
handler: server.GetSyncCommitteeDuties,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/prepare_beacon_proposer",
name: namespace + ".PrepareBeaconProposer",
handler: server.PrepareBeaconProposer,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/liveness/{epoch}",
name: namespace + ".GetLiveness",
handler: server.GetLiveness,
methods: []string{http.MethodPost},
},
{
template: "/eth/v2/validator/blocks/{slot}",
name: namespace + ".ProduceBlockV2",
handler: server.ProduceBlockV2,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/blinded_blocks/{slot}",
name: namespace + ".ProduceBlindedBlock",
handler: server.ProduceBlindedBlock,
methods: []string{http.MethodGet},
},
{
template: "/eth/v3/validator/blocks/{slot}",
name: namespace + ".ProduceBlockV3",
handler: server.ProduceBlockV3,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/validator/beacon_committee_selections",
name: namespace + ".BeaconCommitteeSelections",
handler: server.BeaconCommitteeSelections,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/validator/sync_committee_selections",
name: namespace + ".SyncCommittee Selections",
handler: server.SyncCommitteeSelections,
methods: []string{http.MethodPost},
},
}
}
func (s *Service) nodeEndpoints() []endpoint {
server := &node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
const namespace = "node"
return []endpoint{
{
template: "/eth/v1/node/syncing",
name: namespace + ".GetSyncStatus",
handler: server.GetSyncStatus,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/identity",
name: namespace + ".GetIdentity",
handler: server.GetIdentity,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/peers/{peer_id}",
name: namespace + ".GetPeer",
handler: server.GetPeer,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/peers",
name: namespace + ".GetPeers",
handler: server.GetPeers,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/peer_count",
name: namespace + ".GetPeerCount",
handler: server.GetPeerCount,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/version",
name: namespace + ".GetVersion",
handler: server.GetVersion,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/node/health",
name: namespace + ".GetHealth",
handler: server.GetHealth,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) beaconEndpoints(
ch *stategen.CanonicalHistory,
stater lookup.Stater,
blocker lookup.Blocker,
validatorServer *validatorv1alpha1.Server,
coreService *core.Service,
) []endpoint {
server := &beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
Stater: stater,
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
SyncChecker: s.cfg.SyncService,
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
BLSChangesPool: s.cfg.BLSChangesPool,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
}
const namespace = "beacon"
return []endpoint{
{
template: "/eth/v1/beacon/states/{state_id}/committees",
name: namespace + ".GetCommittees",
handler: server.GetCommittees,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/fork",
name: namespace + ".GetStateFork",
handler: server.GetStateFork,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/root",
name: namespace + ".GetStateRoot",
handler: server.GetStateRoot,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/sync_committees",
name: namespace + ".GetSyncCommittees",
handler: server.GetSyncCommittees,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/randao",
name: namespace + ".GetRandao",
handler: server.GetRandao,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/blocks",
name: namespace + ".PublishBlock",
handler: server.PublishBlock,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/blinded_blocks",
name: namespace + ".PublishBlindedBlock",
handler: server.PublishBlindedBlock,
methods: []string{http.MethodPost},
},
{
template: "/eth/v2/beacon/blocks",
name: namespace + ".PublishBlockV2",
handler: server.PublishBlockV2,
methods: []string{http.MethodPost},
},
{
template: "/eth/v2/beacon/blinded_blocks",
name: namespace + ".PublishBlindedBlockV2",
handler: server.PublishBlindedBlockV2,
methods: []string{http.MethodPost},
},
{
template: "/eth/v2/beacon/blocks/{block_id}",
name: namespace + ".GetBlockV2",
handler: server.GetBlockV2,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/blocks/{block_id}/attestations",
name: namespace + ".GetBlockAttestations",
handler: server.GetBlockAttestations,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/blinded_blocks/{block_id}",
name: namespace + ".GetBlindedBlock",
handler: server.GetBlindedBlock,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/blocks/{block_id}/root",
name: namespace + ".GetBlockRoot",
handler: server.GetBlockRoot,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/attestations",
name: namespace + ".ListAttestations",
handler: server.ListAttestations,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/attestations",
name: namespace + ".SubmitAttestations",
handler: server.SubmitAttestations,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/pool/voluntary_exits",
name: namespace + ".ListVoluntaryExits",
handler: server.ListVoluntaryExits,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/voluntary_exits",
name: namespace + ".SubmitVoluntaryExit",
handler: server.SubmitVoluntaryExit,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/pool/sync_committees",
name: namespace + ".SubmitSyncCommitteeSignatures",
handler: server.SubmitSyncCommitteeSignatures,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/pool/bls_to_execution_changes",
name: namespace + ".ListBLSToExecutionChanges",
handler: server.ListBLSToExecutionChanges,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/bls_to_execution_changes",
name: namespace + ".SubmitBLSToExecutionChanges",
handler: server.SubmitBLSToExecutionChanges,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/pool/attester_slashings",
name: namespace + ".GetAttesterSlashings",
handler: server.GetAttesterSlashings,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/attester_slashings",
name: namespace + ".SubmitAttesterSlashing",
handler: server.SubmitAttesterSlashing,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/pool/proposer_slashings",
name: namespace + ".GetProposerSlashings",
handler: server.GetProposerSlashings,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/pool/proposer_slashings",
name: namespace + ".SubmitProposerSlashing",
handler: server.SubmitProposerSlashing,
methods: []string{http.MethodPost},
},
{
template: "/eth/v1/beacon/headers",
name: namespace + ".GetBlockHeaders",
handler: server.GetBlockHeaders,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/headers/{block_id}",
name: namespace + ".GetBlockHeader",
handler: server.GetBlockHeader,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/genesis",
name: namespace + ".GetGenesis",
handler: server.GetGenesis,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/finality_checkpoints",
name: namespace + ".GetFinalityCheckpoints",
handler: server.GetFinalityCheckpoints,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/validators",
name: namespace + ".GetValidators",
handler: server.GetValidators,
methods: []string{http.MethodGet, http.MethodPost},
},
{
template: "/eth/v1/beacon/states/{state_id}/validators/{validator_id}",
name: namespace + ".GetValidator",
handler: server.GetValidator,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/validator_balances",
name: namespace + ".GetValidatorBalances",
handler: server.GetValidatorBalances,
methods: []string{http.MethodGet, http.MethodPost},
},
}
}
func (s *Service) configEndpoints() []endpoint {
const namespace = "config"
return []endpoint{
{
template: "/eth/v1/config/deposit_contract",
name: namespace + ".GetDepositContract",
handler: config.GetDepositContract,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/config/fork_schedule",
name: namespace + ".GetForkSchedule",
handler: config.GetForkSchedule,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/config/spec",
name: namespace + ".GetSpec",
handler: config.GetSpec,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Stater) []endpoint {
server := &lightclient.Server{
Blocker: blocker,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
}
const namespace = "lightclient"
return []endpoint{
{
template: "/eth/v1/beacon/light_client/bootstrap/{block_root}",
name: namespace + ".GetLightClientBootstrap",
handler: server.GetLightClientBootstrap,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/light_client/updates",
name: namespace + ".GetLightClientUpdatesByRange",
handler: server.GetLightClientUpdatesByRange,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/light_client/finality_update",
name: namespace + ".GetLightClientFinalityUpdate",
handler: server.GetLightClientFinalityUpdate,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/light_client/optimistic_update",
name: namespace + ".GetLightClientOptimisticUpdate",
handler: server.GetLightClientOptimisticUpdate,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
server := &debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
Stater: stater,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
}
const namespace = "debug"
return []endpoint{
{
template: "/eth/v2/debug/beacon/states/{state_id}",
name: namespace + ".GetBeaconStateV2",
handler: server.GetBeaconStateV2,
methods: []string{http.MethodGet},
},
{
template: "/eth/v2/debug/beacon/heads",
name: namespace + ".GetForkChoiceHeadsV2",
handler: server.GetForkChoiceHeadsV2,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/debug/fork_choice",
name: namespace + ".GetForkChoice",
handler: server.GetForkChoice,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) eventsEndpoints() []endpoint {
server := &events.Server{
StateNotifier: s.cfg.StateNotifier,
OperationNotifier: s.cfg.OperationNotifier,
HeadFetcher: s.cfg.HeadFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
}
const namespace = "events"
return []endpoint{
{
template: "/eth/v1/events",
name: namespace + ".StreamEvents",
handler: server.StreamEvents,
methods: []string{http.MethodGet},
},
}
}
// Prysm custom endpoints
func (s *Service) prysmBeaconEndpoints(ch *stategen.CanonicalHistory, stater lookup.Stater) []endpoint {
server := &beaconprysm.Server{
SyncChecker: s.cfg.SyncService,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
Stater: stater,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
}
const namespace = "prysm.beacon"
return []endpoint{
{
template: "/prysm/v1/beacon/weak_subjectivity",
name: namespace + ".GetWeakSubjectivity",
handler: server.GetWeakSubjectivity,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/beacon/states/{state_id}/validator_count",
name: namespace + ".GetValidatorCount",
handler: server.GetValidatorCount,
methods: []string{http.MethodGet},
},
{
template: "/prysm/v1/beacon/states/{state_id}/validator_count",
name: namespace + ".GetValidatorCount",
handler: server.GetValidatorCount,
methods: []string{http.MethodGet},
},
}
}
func (s *Service) prysmNodeEndpoints() []endpoint {
server := &nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
const namespace = "prysm.node"
return []endpoint{
{
template: "/prysm/node/trusted_peers",
name: namespace + ".ListTrustedPeer",
handler: server.ListTrustedPeer,
methods: []string{http.MethodGet},
},
{
template: "/prysm/v1/node/trusted_peers",
name: namespace + ".ListTrustedPeer",
handler: server.ListTrustedPeer,
methods: []string{http.MethodGet},
},
{
template: "/prysm/node/trusted_peers",
name: namespace + ".AddTrustedPeer",
handler: server.AddTrustedPeer,
methods: []string{http.MethodPost},
},
{
template: "/prysm/v1/node/trusted_peers",
name: namespace + ".AddTrustedPeer",
handler: server.AddTrustedPeer,
methods: []string{http.MethodPost},
},
{
template: "/prysm/node/trusted_peers/{peer_id}",
name: namespace + ".RemoveTrustedPeer",
handler: server.RemoveTrustedPeer,
methods: []string{http.MethodDelete},
},
{
template: "/prysm/v1/node/trusted_peers/{peer_id}",
name: namespace + ".RemoveTrustedPeer",
handler: server.RemoveTrustedPeer,
methods: []string{http.MethodDelete},
},
}
}
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
server := &validatorprysm.Server{
CoreService: coreService,
}
const namespace = "prysm.validator"
return []endpoint{
{
template: "/prysm/validators/performance",
name: namespace + ".GetValidatorPerformance",
handler: server.GetValidatorPerformance,
methods: []string{http.MethodPost},
},
{
template: "/prysm/v1/validators/performance",
name: namespace + ".GetValidatorPerformance",
handler: server.GetValidatorPerformance,
methods: []string{http.MethodPost},
},
}
}

View File

@@ -0,0 +1,150 @@
package rpc
import (
"net/http"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
)
func Test_endpoints(t *testing.T) {
rewardsRoutes := map[string][]string{
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
}
beaconRoutes := map[string][]string{
"/eth/v1/beacon/genesis": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
"/eth/v1/beacon/headers": {http.MethodGet},
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
"/eth/v1/beacon/blocks": {http.MethodPost},
"/eth/v2/beacon/blocks": {http.MethodPost},
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
"/eth/v1/beacon/deposit_snapshot": {http.MethodGet},
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
}
lightClientRoutes := map[string][]string{
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
}
builderRoutes := map[string][]string{
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
}
blobRoutes := map[string][]string{
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
}
configRoutes := map[string][]string{
"/eth/v1/config/fork_schedule": {http.MethodGet},
"/eth/v1/config/spec": {http.MethodGet},
"/eth/v1/config/deposit_contract": {http.MethodGet},
}
debugRoutes := map[string][]string{
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v1/debug/fork_choice": {http.MethodGet},
}
eventsRoutes := map[string][]string{
"/eth/v1/events": {http.MethodGet},
}
nodeRoutes := map[string][]string{
"/eth/v1/node/identity": {http.MethodGet},
"/eth/v1/node/peers": {http.MethodGet},
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
"/eth/v1/node/peer_count": {http.MethodGet},
"/eth/v1/node/version": {http.MethodGet},
"/eth/v1/node/syncing": {http.MethodGet},
"/eth/v1/node/health": {http.MethodGet},
}
validatorRoutes := map[string][]string{
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
"/eth/v2/validator/blocks/{slot}": {http.MethodGet},
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet},
"/eth/v1/validator/attestation_data": {http.MethodGet},
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/beacon_committee_selections": {http.MethodPost},
"/eth/v1/validator/sync_committee_selections": {http.MethodPost},
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
}
prysmBeaconRoutes := map[string][]string{
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
"/prysm/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
}
prysmNodeRoutes := map[string][]string{
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/v1/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
"/prysm/v1/node/trusted_peers/{peer_id}": {http.MethodDelete},
}
prysmValidatorRoutes := map[string][]string{
"/prysm/validators/performance": {http.MethodPost},
"/prysm/v1/validators/performance": {http.MethodPost},
}
s := &Service{cfg: &Config{}}
routesMap := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, rewardsRoutes, lightClientRoutes, blobRoutes, prysmValidatorRoutes, prysmNodeRoutes, prysmBeaconRoutes)
actual := s.endpoints(true, nil, nil, nil, nil, nil, nil)
for _, e := range actual {
methods, ok := routesMap[e.template]
assert.Equal(t, true, ok, "endpoint "+e.template+" not found")
if ok {
for _, em := range e.methods {
methodFound := false
for _, m := range methods {
if m == em {
methodFound = true
break
}
}
assert.Equal(t, true, methodFound, "method "+em+" for endpoint "+e.template+" not found")
}
}
}
}

View File

@@ -156,9 +156,6 @@ func TestGetSpec(t *testing.T) {
var dam [4]byte
copy(dam[:], []byte{'1', '0', '0', '0'})
config.DomainApplicationMask = dam
var dbs [4]byte
copy(dam[:], []byte{'2', '0', '0', '0'})
config.DomainBlobSidecar = dbs
params.OverrideBeaconConfig(config)
@@ -173,7 +170,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 130, len(data))
assert.Equal(t, 129, len(data))
for k, v := range data {
switch k {
case "CONFIG_NAME":

View File

@@ -838,10 +838,14 @@ func TestGetAttestationData(t *testing.T) {
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root for justified block")
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpoint := &ethpbalpha.Checkpoint{
Epoch: 2,
Root: justifiedRoot[:],
}
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
chain := &mockChain.ChainService{
Optimistic: false,
@@ -849,6 +853,7 @@ func TestGetAttestationData(t *testing.T) {
Root: blockRoot[:],
CurrentJustifiedCheckPoint: justifiedCheckpoint,
TargetRoot: blockRoot,
State: beaconState,
}
s := &Server{
@@ -1076,10 +1081,14 @@ func TestGetAttestationData(t *testing.T) {
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root for justified block")
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpt := &ethpbalpha.Checkpoint{
Epoch: 0,
Root: justifiedRoot[:],
}
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpt))
require.NoError(t, err)
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
chain := &mockChain.ChainService{
@@ -1087,6 +1096,7 @@ func TestGetAttestationData(t *testing.T) {
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
CurrentJustifiedCheckPoint: justifiedCheckpt,
TargetRoot: blockRoot,
State: beaconState,
}
s := &Server{
@@ -1163,17 +1173,24 @@ func TestGetAttestationData(t *testing.T) {
require.NoError(t, err, "Could not hash beacon block")
justifiedBlockRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not hash justified block")
slot := primitives.Slot(10000)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpt := &ethpbalpha.Checkpoint{
Epoch: slots.ToEpoch(1500),
Root: justifiedBlockRoot[:],
}
slot := primitives.Slot(10000)
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpt))
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
chain := &mockChain.ChainService{
Root: blockRoot[:],
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
CurrentJustifiedCheckPoint: justifiedCheckpt,
TargetRoot: blockRoot,
State: beaconState,
}
s := &Server{

View File

@@ -49,12 +49,16 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
justifiedBlockRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not hash justified block")
slot := primitives.Slot(10000)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: slots.ToEpoch(1500),
Root: justifiedBlockRoot[:],
}
require.NoError(t, err)
slot := primitives.Slot(10000)
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -62,7 +66,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]},
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:], State: beaconState},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},

View File

@@ -102,17 +102,21 @@ func TestGetAttestationData_OK(t *testing.T) {
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root for justified block")
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: 2,
Root: justifiedRoot[:],
}
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:]},
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
@@ -338,10 +342,15 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
targetRoot, err := targetBlock.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root for target block")
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(slot))
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: 0,
Root: justifiedRoot[:],
}
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -350,7 +359,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
TargetRoot: targetRoot, Root: blockRoot[:],
TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},

View File

@@ -6,7 +6,6 @@ import (
"context"
"fmt"
"net"
"net/http"
"sync"
"github.com/gorilla/mux"
@@ -15,6 +14,9 @@ import (
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -32,24 +34,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
rpcBuilder "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/config"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/beacon"
debugv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/debug"
nodev1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/node"
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
chainSync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
@@ -68,6 +58,24 @@ import (
const attestationBufferSize = 100
var (
httpRequestLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "http_request_latency_seconds",
Help: "Latency of HTTP requests in seconds",
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
},
[]string{"endpoint", "code", "method"},
)
httpRequestCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "http_request_count",
Help: "Number of HTTP requests",
},
[]string{"endpoint", "code", "method"},
)
)
// Service defining an RPC server for a beacon node.
type Service struct {
cfg *Config
@@ -208,25 +216,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
BlobStorage: s.cfg.BlobStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
s.initializeRewardServerRoutes(&rewards.Server{
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
BlockRewardFetcher: rewardFetcher,
})
s.initializeBuilderServerRoutes(&rpcBuilder.Server{
FinalizationFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
Stater: stater,
})
s.initializeBlobServerRoutes(&blob.Server{
Blocker: blocker,
})
coreService := &core.Service{
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
@@ -240,7 +229,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
FinalizedFetcher: s.cfg.FinalizationFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
AttPool: s.cfg.AttestationsPool,
@@ -280,26 +268,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
PayloadIDCache: s.cfg.PayloadIDCache,
}
s.validatorServer = validatorServer
s.initializeValidatorServerRoutes(&validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
V1Alpha1Server: validatorServer,
Stater: stater,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
OperationNotifier: s.cfg.OperationNotifier,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
CoreService: coreService,
BlockRewardFetcher: rewardFetcher,
})
nodeServer := &nodev1alpha1.Server{
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
@@ -314,19 +282,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
s.initializeNodeServerRoutes(&node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,
BeaconDB: s.cfg.BeaconDB,
@@ -352,52 +307,23 @@ func NewService(ctx context.Context, cfg *Config) *Service {
CoreService: coreService,
}
s.initializeBeaconServerRoutes(&beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
Stater: stater,
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
SyncChecker: s.cfg.SyncService,
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
BLSChangesPool: s.cfg.BLSChangesPool,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
})
s.initializeConfigRoutes()
s.initializeEventsServerRoutes(&events.Server{
StateNotifier: s.cfg.StateNotifier,
OperationNotifier: s.cfg.OperationNotifier,
HeadFetcher: s.cfg.HeadFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
s.initializeLightClientServerRoutes(&lightclient.Server{
Blocker: blocker,
Stater: stater,
HeadFetcher: s.cfg.HeadFetcher,
})
endpoints := s.endpoints(s.cfg.EnableDebugRPCEndpoints, blocker, stater, rewardFetcher, validatorServer, coreService, ch)
for _, e := range endpoints {
s.cfg.Router.HandleFunc(
e.template,
promhttp.InstrumentHandlerDuration(
httpRequestLatency.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
promhttp.InstrumentHandlerCounter(
httpRequestCount.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
e.handler,
),
),
).Methods(e.methods...)
}
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
if s.cfg.EnableDebugRPCEndpoints {
log.Info("Enabled debug gRPC endpoints")
debugServer := &debugv1alpha1.Server{
@@ -409,47 +335,12 @@ func NewService(ctx context.Context, cfg *Config) *Service {
PeersFetcher: s.cfg.PeersFetcher,
ReplayerBuilder: ch,
}
s.initializeDebugServerRoutes(&debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
Stater: stater,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
})
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
}
ethpbv1alpha1.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
// Register reflection service on gRPC server.
reflection.Register(s.grpcServer)
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{
SyncChecker: s.cfg.SyncService,
HeadFetcher: s.cfg.HeadFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
Stater: stater,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
})
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
})
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{CoreService: coreService})
return s
}
@@ -457,130 +348,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
func (s *Service) initializeRewardServerRoutes(rewardsServer *rewards.Server) {
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/sync_committee/{block_id}", rewardsServer.SyncCommitteeRewards).Methods(http.MethodPost)
}
func (s *Service) initializeBuilderServerRoutes(builderServer *rpcBuilder.Server) {
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
}
func (s *Service) initializeBlobServerRoutes(blobServer *blob.Server) {
s.cfg.Router.HandleFunc("/eth/v1/beacon/blob_sidecars/{block_id}", blobServer.Blobs).Methods(http.MethodGet)
}
func (s *Service) initializeValidatorServerRoutes(validatorServer *validator.Server) {
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_attestation", validatorServer.GetAggregateAttestation).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/contribution_and_proofs", validatorServer.SubmitContributionAndProofs).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_and_proofs", validatorServer.SubmitAggregateAndProofs).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_contribution", validatorServer.ProduceSyncCommitteeContribution).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_subscriptions", validatorServer.SubmitSyncCommitteeSubscription).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_subscriptions", validatorServer.SubmitBeaconCommitteeSubscription).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/attestation_data", validatorServer.GetAttestationData).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/register_validator", validatorServer.RegisterValidator).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/attester/{epoch}", validatorServer.GetAttesterDuties).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/proposer/{epoch}", validatorServer.GetProposerDuties).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/duties/sync/{epoch}", validatorServer.GetSyncCommitteeDuties).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/prepare_beacon_proposer", validatorServer.PrepareBeaconProposer).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/liveness/{epoch}", validatorServer.GetLiveness).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/validator/blocks/{slot}", validatorServer.ProduceBlockV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/blinded_blocks/{slot}", validatorServer.ProduceBlindedBlock).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v3/validator/blocks/{slot}", validatorServer.ProduceBlockV3).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_selections", validatorServer.BeaconCommitteeSelections).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_selections", validatorServer.SyncCommitteeSelections).Methods(http.MethodPost)
}
func (s *Service) initializeNodeServerRoutes(nodeServer *node.Server) {
s.cfg.Router.HandleFunc("/eth/v1/node/syncing", nodeServer.GetSyncStatus).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/identity", nodeServer.GetIdentity).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/peers/{peer_id}", nodeServer.GetPeer).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/peers", nodeServer.GetPeers).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/peer_count", nodeServer.GetPeerCount).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/version", nodeServer.GetVersion).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/node/health", nodeServer.GetHealth).Methods(http.MethodGet)
}
func (s *Service) initializeBeaconServerRoutes(beaconServer *beacon.Server) {
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/committees", beaconServer.GetCommittees).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/fork", beaconServer.GetStateFork).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/root", beaconServer.GetStateRoot).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/sync_committees", beaconServer.GetSyncCommittees).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/randao", beaconServer.GetRandao).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks", beaconServer.PublishBlock).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/blinded_blocks", beaconServer.PublishBlindedBlock).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconServer.PublishBlockV2).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconServer.PublishBlindedBlockV2).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks/{block_id}", beaconServer.GetBlockV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks/{block_id}/attestations", beaconServer.GetBlockAttestations).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/blinded_blocks/{block_id}", beaconServer.GetBlindedBlock).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/blocks/{block_id}/root", beaconServer.GetBlockRoot).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/deposit_snapshot", beaconServer.GetDepositSnapshot).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconServer.ListAttestations).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconServer.SubmitAttestations).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/voluntary_exits", beaconServer.ListVoluntaryExits).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/voluntary_exits", beaconServer.SubmitVoluntaryExit).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/sync_committees", beaconServer.SubmitSyncCommitteeSignatures).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/bls_to_execution_changes", beaconServer.ListBLSToExecutionChanges).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/bls_to_execution_changes", beaconServer.SubmitBLSToExecutionChanges).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attester_slashings", beaconServer.GetAttesterSlashings).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attester_slashings", beaconServer.SubmitAttesterSlashing).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/proposer_slashings", beaconServer.GetProposerSlashings).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/proposer_slashings", beaconServer.SubmitProposerSlashing).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/headers", beaconServer.GetBlockHeaders).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/headers/{block_id}", beaconServer.GetBlockHeader).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/genesis", beaconServer.GetGenesis).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/finality_checkpoints", beaconServer.GetFinalityCheckpoints).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators", beaconServer.GetValidators).Methods(http.MethodGet, http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators/{validator_id}", beaconServer.GetValidator).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_balances", beaconServer.GetValidatorBalances).Methods(http.MethodGet, http.MethodPost)
}
func (s *Service) initializeConfigRoutes() {
s.cfg.Router.HandleFunc("/eth/v1/config/deposit_contract", config.GetDepositContract).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/config/fork_schedule", config.GetForkSchedule).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/config/spec", config.GetSpec).Methods(http.MethodGet)
}
func (s *Service) initializeEventsServerRoutes(eventsServer *events.Server) {
s.cfg.Router.HandleFunc("/eth/v1/events", eventsServer.StreamEvents).Methods(http.MethodGet)
}
func (s *Service) initializeLightClientServerRoutes(lightClientServer *lightclient.Server) {
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/bootstrap/{block_root}", lightClientServer.GetLightClientBootstrap).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/updates", lightClientServer.GetLightClientUpdatesByRange).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/finality_update", lightClientServer.GetLightClientFinalityUpdate).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/optimistic_update", lightClientServer.GetLightClientOptimisticUpdate).Methods(http.MethodGet)
}
func (s *Service) initializeDebugServerRoutes(debugServer *debug.Server) {
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/states/{state_id}", debugServer.GetBeaconStateV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/heads", debugServer.GetForkChoiceHeadsV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
}
// prysm internal routes
func (s *Service) initializePrysmBeaconServerRoutes(beaconServerPrysm *beaconprysm.Server) {
s.cfg.Router.HandleFunc("/prysm/v1/beacon/weak_subjectivity", beaconServerPrysm.GetWeakSubjectivity).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_count", beaconServerPrysm.GetValidatorCount).Methods(http.MethodGet) // TODO: deprecate in Swagger, remove in v6
s.cfg.Router.HandleFunc("/prysm/v1/beacon/states/{state_id}/validator_count", beaconServerPrysm.GetValidatorCount).Methods(http.MethodGet)
}
func (s *Service) initializePrysmNodeServerRoutes(nodeServerPrysm *nodeprysm.Server) {
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet) // TODO: deprecate in Swagger, remove in v6
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost) // TODO: deprecate in Swagger, remove in v6
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete) // TODO: deprecate in Swagger, remove in v6
s.cfg.Router.HandleFunc("/prysm/v1/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete)
}
func (s *Service) initializePrysmValidatorServerRoutes(validatorServerPrysm *validatorprysm.Server) {
s.cfg.Router.HandleFunc("/prysm/validator/performance", validatorServerPrysm.GetValidatorPerformance).Methods(http.MethodPost) // TODO: deprecate in Swagger, remove in v6
s.cfg.Router.HandleFunc("/prysm/v1/validator/performance", validatorServerPrysm.GetValidatorPerformance).Methods(http.MethodPost)
}
// Start the gRPC server.
func (s *Service) Start() {
grpcprometheus.EnableHandlingTimeHistogram()

View File

@@ -4,25 +4,12 @@ import (
"context"
"errors"
"io"
"net/http"
"testing"
"time"
"github.com/gorilla/mux"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob"
rpcBuilder "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/debug"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/node"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/rewards"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator"
beaconprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/beacon"
nodeprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/node"
validatorprysm "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/validator"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
mockSync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -48,148 +35,6 @@ func combineMaps(maps ...map[string][]string) map[string][]string {
return combinedMap
}
func TestServer_InitializeRoutes(t *testing.T) {
s := Service{
cfg: &Config{
Router: mux.NewRouter(),
},
}
s.initializeRewardServerRoutes(&rewards.Server{})
s.initializeBuilderServerRoutes(&rpcBuilder.Server{})
s.initializeBlobServerRoutes(&blob.Server{})
s.initializeValidatorServerRoutes(&validator.Server{})
s.initializeNodeServerRoutes(&node.Server{})
s.initializeBeaconServerRoutes(&beacon.Server{})
s.initializeConfigRoutes()
s.initializeEventsServerRoutes(&events.Server{})
s.initializeLightClientServerRoutes(&lightclient.Server{})
s.initializeDebugServerRoutes(&debug.Server{})
//prysm internal
s.initializePrysmBeaconServerRoutes(&beaconprysm.Server{})
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{})
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{})
beaconRoutes := map[string][]string{
"/eth/v1/beacon/genesis": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
"/eth/v1/beacon/headers": {http.MethodGet},
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
"/eth/v1/beacon/blocks": {http.MethodPost},
"/eth/v2/beacon/blocks": {http.MethodPost},
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
"/eth/v1/beacon/deposit_snapshot": {http.MethodGet},
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
}
builderRoutes := map[string][]string{
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
}
configRoutes := map[string][]string{
"/eth/v1/config/fork_schedule": {http.MethodGet},
"/eth/v1/config/spec": {http.MethodGet},
"/eth/v1/config/deposit_contract": {http.MethodGet},
}
debugRoutes := map[string][]string{
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v1/debug/fork_choice": {http.MethodGet},
}
eventsRoutes := map[string][]string{
"/eth/v1/events": {http.MethodGet},
}
nodeRoutes := map[string][]string{
"/eth/v1/node/identity": {http.MethodGet},
"/eth/v1/node/peers": {http.MethodGet},
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
"/eth/v1/node/peer_count": {http.MethodGet},
"/eth/v1/node/version": {http.MethodGet},
"/eth/v1/node/syncing": {http.MethodGet},
"/eth/v1/node/health": {http.MethodGet},
}
validatorRoutes := map[string][]string{
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
"/eth/v2/validator/blocks/{slot}": {http.MethodGet}, //deprecated
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet}, //deprecated
"/eth/v1/validator/attestation_data": {http.MethodGet},
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/beacon_committee_selections": {http.MethodPost},
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
"/eth/v1/validator/sync_committee_selections": {http.MethodPost},
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
}
prysmCustomRoutes := map[string][]string{
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/v1/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
"/prysm/v1/node/trusted_peers/{peer_id}": {http.MethodDelete},
"/prysm/validator/performance": {http.MethodPost},
"/prysm/v1/validator/performance": {http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
"/prysm/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
}
wantRouteList := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, prysmCustomRoutes)
gotRouteList := make(map[string][]string)
err := s.cfg.Router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
tpl, err1 := route.GetPathTemplate()
require.NoError(t, err1)
met, err2 := route.GetMethods()
require.NoError(t, err2)
methods, ok := gotRouteList[tpl]
if !ok {
gotRouteList[tpl] = met
} else {
gotRouteList[tpl] = append(methods, met...)
}
return nil
})
require.NoError(t, err)
require.DeepEqual(t, wantRouteList, gotRouteList)
}
func TestLifecycle_OK(t *testing.T) {
hook := logTest.NewGlobal()
chainService := &mock.ChainService{

View File

@@ -19,7 +19,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateAltair{
Slot: 2,
Slot: 66,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
@@ -35,8 +35,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
@@ -57,8 +57,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -27,25 +27,44 @@ func UnrealizedCheckpointBalances(cp, pp []byte, validators ValReader, currentEp
if err != nil {
return 0, 0, 0, err
}
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
if active && !v.Slashed {
activeCurrent := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
if activeCurrent {
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
if ((cp[i] >> targetIdx) & 1) == 1 {
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
if v.Slashed {
continue
}
if activeCurrent && ((cp[i]>>targetIdx)&1) == 1 {
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
if ((pp[i] >> targetIdx) & 1) == 1 {
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
activePrevious := v.ActivationEpoch < currentEpoch && currentEpoch <= v.ExitEpoch
if activePrevious && ((pp[i]>>targetIdx)&1) == 1 {
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
}
activeBalance, prevTarget, currentTarget = ensureLowerBound(activeBalance, prevTarget, currentTarget)
return activeBalance, prevTarget, currentTarget, nil
}
func ensureLowerBound(activeCurrEpoch, prevTargetAttested, currTargetAttested uint64) (uint64, uint64, uint64) {
ebi := params.BeaconConfig().EffectiveBalanceIncrement
if ebi > activeCurrEpoch {
activeCurrEpoch = ebi
}
if ebi > prevTargetAttested {
prevTargetAttested = ebi
}
if ebi > currTargetAttested {
currTargetAttested = ebi
}
return activeCurrEpoch, prevTargetAttested, currTargetAttested
}

View File

@@ -29,8 +29,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("bad votes in last two epochs", func(tt *testing.T) {
@@ -39,8 +39,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("two votes in last epoch", func(tt *testing.T) {
@@ -50,7 +50,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("two votes in previous epoch", func(tt *testing.T) {
@@ -59,7 +59,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
@@ -79,7 +79,6 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
@@ -119,8 +118,8 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("bad votes in last two epochs", func(tt *testing.T) {
@@ -129,8 +128,8 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("two votes in last epoch", func(tt *testing.T) {
@@ -140,7 +139,7 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(tt, uint64(0), previous)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
})
t.Run("two votes in previous epoch", func(tt *testing.T) {
@@ -149,7 +148,7 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
@@ -169,7 +168,6 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)

View File

@@ -108,6 +108,7 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
"//io/file:go_default_library",
"//monitoring/tracing:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -243,13 +243,13 @@ func (s *Service) logBatchSyncStatus(genesis time.Time, firstBlk blocks.ROBlock,
firstRoot := firstBlk.Root()
timeRemaining := time.Duration(float64(slots.Since(genesis)-firstBlk.Block().Slot())/rate) * time.Second
log.WithFields(logrus.Fields{
"peers": len(s.cfg.P2P.Peers().Connected()),
"blocksPerSecond": fmt.Sprintf("%.1f", rate),
}).Infof(
"Processing block batch of size %d starting from %s %d/%d - estimated time remaining %s",
nBlocks, fmt.Sprintf("0x%s...", hex.EncodeToString(firstRoot[:])[:8]),
firstBlk.Block().Slot(), slots.Since(genesis), timeRemaining,
)
"peers": len(s.cfg.P2P.Peers().Connected()),
"blocksPerSecond": fmt.Sprintf("%.1f", rate),
"batchSize": nBlocks,
"startingFrom": fmt.Sprintf("0x%s...", hex.EncodeToString(firstRoot[:])[:8]),
"latestProcessedSlot/currentSlot": fmt.Sprintf("%d/%d", firstBlk.Block().Slot(), slots.Since(genesis)),
"estimatedTimeRemaining": timeRemaining,
}).Info("Processing blocks")
}
// processBlock performs basic checks on incoming block, and triggers receiver function.

View File

@@ -194,7 +194,7 @@ func (s *Service) Start() {
}
panic(err)
}
log.Infof("Synced up to slot %d", s.cfg.Chain.HeadSlot())
log.WithField("slot", s.cfg.Chain.HeadSlot()).Info("Synced up to")
s.markSynced()
}

View File

@@ -248,7 +248,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
}
go messageLoop()
log.WithField("topic", topic).Info("Subscribed to topic")
log.WithField("topic", topic).Info("Subscribed to")
return sub
}
@@ -286,6 +286,11 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
return pubsub.ValidationIgnore
}
b, err := v(ctx, pid, msg)
// We do not penalize peers if we are hitting pubsub timeouts
// trying to process those messages.
if b == pubsub.ValidationReject && ctx.Err() != nil {
b = pubsub.ValidationIgnore
}
if b == pubsub.ValidationReject {
fields := logrus.Fields{
"topic": topic,
@@ -687,7 +692,7 @@ func (s *Service) retrievePersistentSubs(currSlot primitives.Slot) []uint64 {
return slice.SetUint64(append(persistentSubs, wantedSubs...))
}
func (_ *Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
func (*Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
subs := cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
return slice.SetUint64(subs)
}
@@ -742,17 +747,17 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
}
// Add fork digest to topic.
func (_ *Service) addDigestToTopic(topic string, digest [4]byte) string {
func (*Service) addDigestToTopic(topic string, digest [4]byte) string {
if !strings.Contains(topic, "%x") {
log.Fatal("Topic does not have appropriate formatter for digest")
log.Error("Topic does not have appropriate formatter for digest")
}
return fmt.Sprintf(topic, digest)
}
// Add the digest and index to subnet topic.
func (_ *Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint64) string {
func (*Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint64) string {
if !strings.Contains(topic, "%x") {
log.Fatal("Topic does not have appropriate formatter for digest")
log.Error("Topic does not have appropriate formatter for digest")
}
return fmt.Sprintf(topic, digest, idx)
}

View File

@@ -2,10 +2,16 @@ package sync
import (
"context"
"fmt"
"os"
"path"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/io/file"
"google.golang.org/protobuf/proto"
)
@@ -33,7 +39,10 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
if r != [32]byte{} {
s.setBadBlock(ctx, r) // Setting head block as bad.
} else {
// TODO(13721): Remove this once we can deprecate the flag.
interop.WriteBlockToDisk(signed, true /*failed*/)
saveInvalidBlockToTemp(signed)
s.setBadBlock(ctx, root)
}
}
@@ -45,3 +54,21 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
// WriteInvalidBlockToDisk as a block ssz. Writes to temp directory.
func saveInvalidBlockToTemp(block interfaces.ReadOnlySignedBeaconBlock) {
if !features.Get().SaveInvalidBlock {
return
}
filename := fmt.Sprintf("beacon_block_%d.ssz", block.Block().Slot())
fp := path.Join(os.TempDir(), filename)
log.Warnf("Writing invalid block to disk at %s", fp)
enc, err := block.MarshalSSZ()
if err != nil {
log.WithError(err).Error("Failed to ssz encode block")
return
}
if err := file.WriteFile(fp, enc); err != nil {
log.WithError(err).Error("Failed to write to disk")
}
}

View File

@@ -45,6 +45,7 @@ go_test(
"blob_test.go",
"cache_test.go",
"initializer_test.go",
"result_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -29,9 +29,7 @@ const (
RequireSidecarProposerExpected
)
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
var GossipSidecarRequirements = []Requirement{
var allSidecarRequirements = []Requirement{
RequireBlobIndexInBounds,
RequireNotFromFutureSlot,
RequireSlotAboveFinalized,
@@ -45,26 +43,32 @@ var GossipSidecarRequirements = []Requirement{
RequireSidecarProposerExpected,
}
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
var GossipSidecarRequirements = requirementList(allSidecarRequirements).excluding()
// SpectestSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
// The only requirements we exclude for these tests are the parent validity and seen tests, as these are specific to
// gossip processing and require the bad block cache that we only use there.
var SpectestSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
RequireSidecarParentSeen, RequireSidecarParentValid)
// InitsyncSidecarRequirements is the list of verification requirements to be used by the init-sync service
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
// for blobs after the block has been verified, and the blobs to be verified are keyed in the cache by the
// block root, it is safe to skip the following verifications.
// RequireSidecarProposerExpected
// RequireNotFromFutureSlot,
// RequireSlotAboveFinalized,
// RequireSidecarParentSeen,
// RequireSidecarParentValid,
// RequireSidecarParentSlotLower,
// RequireSidecarDescendsFromFinalized,
var InitsyncSidecarRequirements = []Requirement{
RequireValidProposerSignature,
RequireSidecarKzgProofVerified,
RequireBlobIndexInBounds,
RequireSidecarInclusionProven,
}
// block root, the list of required verifications is much shorter than gossip.
var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
RequireNotFromFutureSlot,
RequireSlotAboveFinalized,
RequireSidecarParentSeen,
RequireSidecarParentValid,
RequireSidecarParentSlotLower,
RequireSidecarDescendsFromFinalized,
RequireSidecarProposerExpected,
)
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements
var BackfillSidecarRequirements = InitsyncSidecarRequirements
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
var (
ErrBlobInvalid = errors.New("blob failed verification")

View File

@@ -61,13 +61,17 @@ func (d SignatureData) logFields() log.Fields {
}
}
func newSigCache(vr []byte, size int) *sigCache {
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr}
func newSigCache(vr []byte, size int, gf forkLookup) *sigCache {
if gf == nil {
gf = forks.Fork
}
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr, getFork: gf}
}
type sigCache struct {
*lru.Cache
valRoot []byte
getFork forkLookup
}
// VerifySignature verifies the given signature data against the key obtained via ValidatorAtIndexer.
@@ -81,7 +85,7 @@ func (c *sigCache) VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err
}
}()
e := slots.ToEpoch(sig.Slot)
fork, err := forks.Fork(e)
fork, err := c.getFork(e)
if err != nil {
return err
}

View File

@@ -27,7 +27,7 @@ func TestVerifySignature(t *testing.T) {
_, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1)
b := blobs[0]
sc := newSigCache(valRoot[:], 1)
sc := newSigCache(valRoot[:], 1, nil)
cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) {
return &eth.Validator{PublicKey: pk.Marshal()}, nil
}
@@ -42,7 +42,7 @@ func TestSignatureCacheMissThenHit(t *testing.T) {
_, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1)
b := blobs[0]
sc := newSigCache(valRoot[:], 1)
sc := newSigCache(valRoot[:], 1, nil)
cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) {
return &eth.Validator{PublicKey: pk.Marshal()}, nil
}

View File

@@ -13,11 +13,17 @@ type VerificationMultiError struct {
// Unwrap is used by errors.Is to unwrap errors.
func (ve VerificationMultiError) Unwrap() error {
if ve.err == nil {
return nil
}
return ve.err
}
// Error satisfies the standard error interface.
func (ve VerificationMultiError) Error() string {
if ve.err == nil {
return ""
}
return ve.err.Error()
}

View File

@@ -10,6 +10,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// Forkchoicer represents the forkchoice methods that the verifiers need.
@@ -59,13 +61,25 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
// via the WaitForInitializer method.
type InitializerWaiter struct {
sync.RWMutex
ready bool
cw startup.ClockWaiter
ini *Initializer
ready bool
cw startup.ClockWaiter
ini *Initializer
getFork forkLookup
}
type forkLookup func(targetEpoch primitives.Epoch) (*ethpb.Fork, error)
type InitializerOption func(waiter *InitializerWaiter)
// WithForkLookup allows tests to modify how Fork consensus type lookup works. Needed for spectests with weird Forks.
func WithForkLookup(fl forkLookup) InitializerOption {
return func(iw *InitializerWaiter) {
iw.getFork = fl
}
}
// NewInitializerWaiter creates an InitializerWaiter which can be used to obtain an Initializer once async dependencies are ready.
func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRooter) *InitializerWaiter {
func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRooter, opts ...InitializerOption) *InitializerWaiter {
pc := newPropCache()
// signature cache is initialized in WaitForInitializer, since we need the genesis validators root, which can be obtained from startup.Clock.
shared := &sharedResources{
@@ -73,7 +87,14 @@ func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRoot
pc: pc,
sr: sr,
}
return &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}}
iw := &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}}
for _, o := range opts {
o(iw)
}
if iw.getFork == nil {
iw.getFork = forks.Fork
}
return iw
}
// WaitForInitializer ensures that asynchronous initialization of the shared resources the initializer
@@ -84,7 +105,7 @@ func (w *InitializerWaiter) WaitForInitializer(ctx context.Context) (*Initialize
}
// We wait until this point to initialize the signature cache because here we have access to the genesis validator root.
vr := w.ini.shared.clock.GenesisValidatorsRoot()
sc := newSigCache(vr[:], DefaultSignatureCacheSize)
sc := newSigCache(vr[:], DefaultSignatureCacheSize, w.getFork)
w.ini.shared.sc = sc
return w.ini, nil
}

View File

@@ -3,6 +3,54 @@ package verification
// Requirement represents a validation check that needs to pass in order for a Verified form a consensus type to be issued.
type Requirement int
var unknownRequirementName = "unknown"
func (r Requirement) String() string {
switch r {
case RequireBlobIndexInBounds:
return "RequireBlobIndexInBounds"
case RequireNotFromFutureSlot:
return "RequireNotFromFutureSlot"
case RequireSlotAboveFinalized:
return "RequireSlotAboveFinalized"
case RequireValidProposerSignature:
return "RequireValidProposerSignature"
case RequireSidecarParentSeen:
return "RequireSidecarParentSeen"
case RequireSidecarParentValid:
return "RequireSidecarParentValid"
case RequireSidecarParentSlotLower:
return "RequireSidecarParentSlotLower"
case RequireSidecarDescendsFromFinalized:
return "RequireSidecarDescendsFromFinalized"
case RequireSidecarInclusionProven:
return "RequireSidecarInclusionProven"
case RequireSidecarKzgProofVerified:
return "RequireSidecarKzgProofVerified"
case RequireSidecarProposerExpected:
return "RequireSidecarProposerExpected"
default:
return unknownRequirementName
}
}
type requirementList []Requirement
func (rl requirementList) excluding(minus ...Requirement) []Requirement {
rm := make(map[Requirement]struct{})
nl := make([]Requirement, 0, len(rl)-len(minus))
for i := range minus {
rm[minus[i]] = struct{}{}
}
for i := range rl {
if _, excluded := rm[rl[i]]; excluded {
continue
}
nl = append(nl, rl[i])
}
return nl
}
// results collects positive verification results.
// This bitmap can be used to test which verifications have been successfully completed in order to
// decide whether it is safe to issue a "Verified" type variant.

View File

@@ -0,0 +1,63 @@
package verification
import (
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestResultList(t *testing.T) {
const (
a Requirement = iota
b
c
d
e
f
g
h
)
// leave out h to test excluding non-existent item
all := []Requirement{a, b, c, d, e, f, g}
alsoAll := requirementList(all).excluding()
require.DeepEqual(t, all, alsoAll)
missingFirst := requirementList(all).excluding(a)
require.Equal(t, len(all)-1, len(missingFirst))
require.DeepEqual(t, all[1:], missingFirst)
missingLast := requirementList(all).excluding(g)
require.Equal(t, len(all)-1, len(missingLast))
require.DeepEqual(t, all[0:len(all)-1], missingLast)
missingEnds := requirementList(missingLast).excluding(a)
require.Equal(t, len(missingLast)-1, len(missingEnds))
require.DeepEqual(t, all[1:len(all)-1], missingEnds)
excludeNonexist := requirementList(missingEnds).excluding(h)
require.Equal(t, len(missingEnds), len(excludeNonexist))
require.DeepEqual(t, missingEnds, excludeNonexist)
}
func TestExportedBlobSanityCheck(t *testing.T) {
// make sure all requirement lists contain the bare minimum checks
sanity := []Requirement{RequireValidProposerSignature, RequireSidecarKzgProofVerified, RequireBlobIndexInBounds, RequireSidecarInclusionProven}
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements}
for i := range reqs {
r := reqs[i]
reqMap := make(map[Requirement]struct{})
for ii := range r {
reqMap[r[ii]] = struct{}{}
}
for ii := range sanity {
_, ok := reqMap[sanity[ii]]
require.Equal(t, true, ok)
}
}
require.DeepEqual(t, allSidecarRequirements, GossipSidecarRequirements)
}
func TestAllBlobRequirementsHaveStrings(t *testing.T) {
var derp Requirement = math.MaxInt
require.Equal(t, unknownRequirementName, derp.String())
for i := range allSidecarRequirements {
require.NotEqual(t, unknownRequirementName, allSidecarRequirements[i].String())
}
}

View File

@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["options.go"],
srcs = [
"log.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/execution",
visibility = [
"//beacon-chain:__subpackages__",

View File

@@ -0,0 +1,5 @@
package execution
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "execution")

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/io/file"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
@@ -53,7 +52,7 @@ func parseJWTSecretFromFile(c *cli.Context) ([]byte, error) {
return nil, err
}
strData := strings.TrimSpace(string(enc))
if len(strData) == 0 {
if strData == "" {
return nil, fmt.Errorf("provided JWT secret in file %s cannot be empty", jwtSecretFile)
}
secret, err := hex.DecodeString(strings.TrimPrefix(strData, "0x"))

View File

@@ -72,6 +72,9 @@ type Flags struct {
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
// BlobSaveFsync requires blob saving to block on fsync to ensure blobs are durably persisted before passing DA.
BlobSaveFsync bool
SaveInvalidBlock bool // SaveInvalidBlock saves invalid block to temp.
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
// changed on disk. This feature is for advanced use cases only.
KeystoreImportDebounceInterval time.Duration
@@ -187,6 +190,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
cfg.WriteSSZStateTransitions = true
}
if ctx.Bool(saveInvalidBlockTempFlag.Name) {
logEnabled(saveInvalidBlockTempFlag)
cfg.SaveInvalidBlock = true
}
if ctx.IsSet(disableGRPCConnectionLogging.Name) {
logDisabled(disableGRPCConnectionLogging)
cfg.DisableGRPCConnectionLogs = true

View File

@@ -42,6 +42,10 @@ var (
Name: "interop-write-ssz-state-transitions",
Usage: "Writes SSZ states to disk after attempted state transitio.",
}
saveInvalidBlockTempFlag = &cli.BoolFlag{
Name: "save-invalid-block-temp",
Usage: "Writes invalid blocks to temp directory.",
}
disableGRPCConnectionLogging = &cli.BoolFlag{
Name: "disable-grpc-connection-logging",
Usage: "Disables displaying logs for newly connected grpc clients.",
@@ -97,7 +101,7 @@ var (
}
EnableMinimalSlashingProtection = &cli.BoolFlag{
Name: "enable-minimal-slashing-protection",
Usage: "Enables the minimal slashing protection. See EIP-3076 for more details.",
Usage: "(Experimental): Enables the minimal slashing protection. See EIP-3076 for more details.",
}
enableDoppelGangerProtection = &cli.BoolFlag{
Name: "enable-doppelganger",
@@ -196,6 +200,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
devModeFlag,
enableExperimentalState,
writeSSZStateTransitionsFlag,
saveInvalidBlockTempFlag,
disableGRPCConnectionLogging,
HoleskyTestnet,
PraterTestnet,

View File

@@ -120,7 +120,6 @@ type BeaconChainConfig struct {
DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
DomainApplicationBuilder [4]byte `yaml:"DOMAIN_APPLICATION_BUILDER" spec:"true"` // DomainApplicationBuilder defines the BLS signature domain for application builder.
DomainBLSToExecutionChange [4]byte `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE" spec:"true"` // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
DomainBlobSidecar [4]byte `yaml:"DOMAIN_BLOB_SIDECAR" spec:"true"` // DomainBlobSidecar defines the BLS signature domain for blob sidecar.
// Prysm constants.
GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.

View File

@@ -166,7 +166,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
DomainApplicationMask: bytesutil.Uint32ToBytes4(0x00000001),
DomainApplicationBuilder: bytesutil.Uint32ToBytes4(0x00000001),
DomainBLSToExecutionChange: bytesutil.Uint32ToBytes4(0x0A000000),
DomainBlobSidecar: bytesutil.Uint32ToBytes4(0x0B000000),
// Prysm constants.
GweiPerEth: 1000000000,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -537,6 +537,9 @@ message DutiesResponse {
// Whether the validator belongs in the sync committee and has to perform sync committee duty.
bool is_sync_committee = 8;
// The number of committees in the duty's slot.
uint64 committees_at_slot = 9;
}
}

View File

@@ -8,6 +8,5 @@ import (
)
func TestMainnet_Deneb_Forkchoice(t *testing.T) {
t.Skip("This will fail until we re-integrate proof verification")
forkchoice.Run(t, "mainnet", version.Deneb)
}

View File

@@ -8,6 +8,5 @@ import (
)
func TestMinimal_Deneb_Forkchoice(t *testing.T) {
t.Skip("blocked by go-kzg-4844 minimal trusted setup")
forkchoice.Run(t, "minimal", version.Deneb)
}

View File

@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
require.NoError(t, err)
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
require.NoError(t, err, "Could not process justification")

View File

@@ -76,6 +76,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)

View File

@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
require.NoError(t, err)
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
require.NoError(t, err, "Could not process justification")

View File

@@ -80,6 +80,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)

View File

@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
require.NoError(t, err)
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
require.NoError(t, err, "Could not process justification")

View File

@@ -80,6 +80,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)

View File

@@ -22,6 +22,7 @@ go_library(
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/startup:go_default_library",

View File

@@ -10,7 +10,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -23,16 +25,27 @@ type Builder struct {
service *blockchain.Service
lastTick int64
execMock *engineMock
vwait *verification.InitializerWaiter
}
func NewBuilder(t testing.TB, initialState state.BeaconState, initialBlock interfaces.ReadOnlySignedBeaconBlock) *Builder {
execMock := &engineMock{
powBlocks: make(map[[32]byte]*ethpb.PowBlock),
}
service := startChainService(t, initialState, initialBlock, execMock)
cw := startup.NewClockSynchronizer()
service, sg, fc := startChainService(t, initialState, initialBlock, execMock, cw)
// blob spectests use a weird Fork in the genesis beacon state that has different previous and current versions.
// This trips up the lite fork lookup code in the blob verifier that figures out the fork
// based on the slot of the block. So just for spectests we override that behavior and get the fork from the state
// which matches the behavior of block verification.
getFork := func(targetEpoch primitives.Epoch) (*ethpb.Fork, error) {
return initialState.Fork(), nil
}
bvw := verification.NewInitializerWaiter(cw, fc, sg, verification.WithForkLookup(getFork))
return &Builder{
service: service,
execMock: execMock,
vwait: bvw,
}
}
@@ -88,7 +101,7 @@ func (bb *Builder) block(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) [
// InvalidBlock receives the invalid block and notifies forkchoice.
func (bb *Builder) InvalidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) {
r := bb.block(t, b)
ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
defer cancel()
require.Equal(t, true, bb.service.ReceiveBlock(ctx, b, r, nil) != nil)
}
@@ -96,7 +109,7 @@ func (bb *Builder) InvalidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconB
// ValidBlock receives the valid block and notifies forkchoice.
func (bb *Builder) ValidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) {
r := bb.block(t, b)
ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
defer cancel()
require.NoError(t, bb.service.ReceiveBlock(ctx, b, r, nil))
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -115,7 +116,7 @@ func runTest(t *testing.T, config string, fork int, basePath string) {
t.Fatalf("unknown fork version: %v", fork)
}
}
runBlobStep(t, step.Blobs, beaconBlock, fork, folder, testsFolderPath, step.Proofs, builder)
runBlobStep(t, step, beaconBlock, fork, folder, testsFolderPath, builder)
if beaconBlock != nil {
if step.Valid != nil && !*step.Valid {
builder.InvalidBlock(t, beaconBlock)
@@ -281,14 +282,15 @@ func unmarshalSignedDenebBlock(t *testing.T, raw []byte) interfaces.SignedBeacon
}
func runBlobStep(t *testing.T,
blobs *string,
step Step,
beaconBlock interfaces.ReadOnlySignedBeaconBlock,
fork int,
folder os.DirEntry,
testsFolderPath string,
proofs []*string,
builder *Builder,
) {
blobs := step.Blobs
proofs := step.Proofs
if blobs != nil && *blobs != "null" {
require.NotNil(t, beaconBlock)
require.Equal(t, true, fork >= version.Deneb)
@@ -305,44 +307,94 @@ func runBlobStep(t *testing.T,
require.NoError(t, err)
sh, err := beaconBlock.Header()
require.NoError(t, err)
for index := uint64(0); index*fieldparams.BlobLength < uint64(len(blobsSSZ)); index++ {
requireVerifyExpected := errAssertionForStep(step, verification.ErrBlobInvalid)
for index := 0; index*fieldparams.BlobLength < len(blobsSSZ); index++ {
var proof []byte
if index < uint64(len(proofs)) {
if index < len(proofs) {
proofPTR := proofs[index]
require.NotNil(t, proofPTR)
proof, err = hexutil.Decode(*proofPTR)
require.NoError(t, err)
}
var kzg []byte
if uint64(len(kzgs)) < index {
kzg = kzgs[index]
}
if len(kzg) == 0 {
kzg = make([]byte, 48)
}
blob := [fieldparams.BlobLength]byte{}
copy(blob[:], blobsSSZ[index*fieldparams.BlobLength:])
fakeProof := make([][]byte, fieldparams.KzgCommitmentInclusionProofDepth)
for i := range fakeProof {
fakeProof[i] = make([]byte, fieldparams.RootLength)
}
if len(proof) == 0 {
proof = make([]byte, 48)
}
inclusionProof, err := blocks.MerkleProofKZGCommitment(block.Body(), index)
require.NoError(t, err)
pb := &ethpb.BlobSidecar{
Index: index,
Index: uint64(index),
Blob: blob[:],
KzgCommitment: kzg,
KzgCommitment: kzgs[index],
KzgProof: proof,
SignedBlockHeader: sh,
CommitmentInclusionProof: fakeProof,
CommitmentInclusionProof: inclusionProof,
}
ro, err := blocks.NewROBlobWithRoot(pb, root)
require.NoError(t, err)
vsc, err := verification.BlobSidecarNoop(ro)
ini, err := builder.vwait.WaitForInitializer(context.Background())
require.NoError(t, err)
require.NoError(t, builder.service.ReceiveBlob(context.Background(), vsc))
bv := ini.NewBlobVerifier(ro, verification.SpectestSidecarRequirements)
ctx := context.Background()
if err := bv.BlobIndexInBounds(); err != nil {
t.Logf("BlobIndexInBounds error: %s", err.Error())
}
if err := bv.NotFromFutureSlot(); err != nil {
t.Logf("NotFromFutureSlot error: %s", err.Error())
}
if err := bv.SlotAboveFinalized(); err != nil {
t.Logf("SlotAboveFinalized error: %s", err.Error())
}
if err := bv.SidecarInclusionProven(); err != nil {
t.Logf("SidecarInclusionProven error: %s", err.Error())
}
if err := bv.SidecarKzgProofVerified(); err != nil {
t.Logf("SidecarKzgProofVerified error: %s", err.Error())
}
if err := bv.ValidProposerSignature(ctx); err != nil {
t.Logf("ValidProposerSignature error: %s", err.Error())
}
if err := bv.SidecarParentSlotLower(); err != nil {
t.Logf("SidecarParentSlotLower error: %s", err.Error())
}
if err := bv.SidecarDescendsFromFinalized(); err != nil {
t.Logf("SidecarDescendsFromFinalized error: %s", err.Error())
}
if err := bv.SidecarProposerExpected(ctx); err != nil {
t.Logf("SidecarProposerExpected error: %s", err.Error())
}
vsc, err := bv.VerifiedROBlob()
requireVerifyExpected(t, err)
if err == nil {
require.NoError(t, builder.service.ReceiveBlob(context.Background(), vsc))
}
}
}
}
func errAssertionForStep(step Step, expect error) func(t *testing.T, err error) {
if !*step.Valid {
return func(t *testing.T, err error) {
require.ErrorIs(t, err, expect)
}
}
return func(t *testing.T, err error) {
if err != nil {
require.ErrorIs(t, err, verification.ErrBlobInvalid)
me, ok := err.(verification.VerificationMultiError)
require.Equal(t, true, ok)
fails := me.Failures()
// we haven't performed any verification, so all the results should be this type
fmsg := make([]string, 0, len(fails))
for k, v := range fails {
fmsg = append(fmsg, fmt.Sprintf("%s - %s", v.Error(), k.String()))
}
t.Fatal(strings.Join(fmsg, ";"))
}
}
}

View File

@@ -16,6 +16,7 @@ import (
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
@@ -34,7 +35,8 @@ func startChainService(t testing.TB,
st state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
engineMock *engineMock,
) *blockchain.Service {
clockSync *startup.ClockSynchronizer,
) (*blockchain.Service, *stategen.State, forkchoice.ForkChoicer) {
ctx := context.Background()
db := testDB.SetupDB(t)
require.NoError(t, db.SaveBlock(ctx, block))
@@ -58,28 +60,30 @@ func startChainService(t testing.TB,
require.NoError(t, err)
fc := doublylinkedtree.New()
sg := stategen.New(db, fc)
opts := append([]blockchain.Option{},
blockchain.WithExecutionEngineCaller(engineMock),
blockchain.WithFinalizedStateAtStartUp(st),
blockchain.WithDatabase(db),
blockchain.WithAttestationService(attPool),
blockchain.WithForkChoiceStore(fc),
blockchain.WithStateGen(stategen.New(db, fc)),
blockchain.WithStateGen(sg),
blockchain.WithStateNotifier(&mock.MockStateNotifier{}),
blockchain.WithAttestationPool(attestations.NewPool()),
blockchain.WithDepositCache(depositCache),
blockchain.WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
blockchain.WithPayloadIDCache(cache.NewPayloadIDCache()),
blockchain.WithClockSynchronizer(startup.NewClockSynchronizer()),
blockchain.WithClockSynchronizer(clockSync),
blockchain.WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
blockchain.WithSyncChecker(mock.MockChecker{}),
blockchain.WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
)
service, err := blockchain.NewService(context.Background(), opts...)
require.NoError(t, err)
// force start kzg context here until Deneb fork epoch is decided
require.NoError(t, kzg.Start())
require.NoError(t, service.StartFromSavedState(st))
return service
return service, sg, fc
}
type engineMock struct {

View File

@@ -26,7 +26,6 @@ type SingleMerkleProof struct {
}
func RunMerkleProofTests(t *testing.T, config, forkOrPhase string, unmarshaller ssz_static.Unmarshaller) {
t.Skip("testvectors are not available yet")
runSingleMerkleProofTests(t, config, forkOrPhase, unmarshaller)
}

View File

@@ -32,7 +32,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
require.NoError(t, err)
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
require.NoError(t, err, "Could not process justification")

View File

@@ -70,8 +70,16 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
vp, bp, err := altair.InitializePrecomputeValidators(ctx, preBeaconState)
require.NoError(t, err)
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
require.NoError(t, err)
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
require.NoError(t, err)

View File

@@ -13,7 +13,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/testing/validator-mock",
visibility = ["//visibility:public"],
deps = [
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//validator/client/iface:go_default_library",

View File

@@ -13,7 +13,6 @@ import (
context "context"
reflect "reflect"
primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
iface "github.com/prysmaticlabs/prysm/v5/validator/client/iface"
gomock "go.uber.org/mock/gomock"
@@ -402,7 +401,7 @@ func (mr *MockValidatorClientMockRecorder) SubmitValidatorRegistrations(arg0, ar
}
// SubscribeCommitteeSubnets mocks base method.
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 []primitives.ValidatorIndex) (*emptypb.Empty, error) {
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 []*eth.DutiesResponse_Duty) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", arg0, arg1, arg2)
ret0, _ := ret[0].(*emptypb.Empty)

View File

@@ -22,6 +22,7 @@ go_library(
"index.go",
"json_rest_handler.go",
"log.go",
"metrics.go",
"prepare_beacon_proposer.go",
"propose_attestation.go",
"propose_beacon_block.go",
@@ -60,6 +61,8 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_protobuf//ptypes/empty",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
@@ -50,32 +49,38 @@ func NewBeaconApiValidatorClient(jsonRestHandler JsonRestHandler, opts ...Valida
}
func (c *beaconApiValidatorClient) GetDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
return c.getDuties(ctx, in)
return wrapInMetrics[*ethpb.DutiesResponse]("GetDuties", func() (*ethpb.DutiesResponse, error) {
return c.getDuties(ctx, in)
})
}
func (c *beaconApiValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
return c.checkDoppelGanger(ctx, in)
return wrapInMetrics[*ethpb.DoppelGangerResponse]("CheckDoppelGanger", func() (*ethpb.DoppelGangerResponse, error) {
return c.checkDoppelGanger(ctx, in)
})
}
func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
if len(in.Domain) != 4 {
return nil, errors.Errorf("invalid domain type: %s", hexutil.Encode(in.Domain))
}
domainType := bytesutil.ToBytes4(in.Domain)
return c.getDomainData(ctx, in.Epoch, domainType)
return wrapInMetrics[*ethpb.DomainResponse]("DomainData", func() (*ethpb.DomainResponse, error) {
return c.getDomainData(ctx, in.Epoch, domainType)
})
}
func (c *beaconApiValidatorClient) GetAttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
if in == nil {
return nil, errors.New("GetAttestationData received nil argument `in`")
}
return c.getAttestationData(ctx, in.Slot, in.CommitteeIndex)
return wrapInMetrics[*ethpb.AttestationData]("GetAttestationData", func() (*ethpb.AttestationData, error) {
return c.getAttestationData(ctx, in.Slot, in.CommitteeIndex)
})
}
func (c *beaconApiValidatorClient) GetBeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
return c.getBeaconBlock(ctx, in.Slot, in.RandaoReveal, in.Graffiti)
return wrapInMetrics[*ethpb.GenericBeaconBlock]("GetBeaconBlock", func() (*ethpb.GenericBeaconBlock, error) {
return c.getBeaconBlock(ctx, in.Slot, in.RandaoReveal, in.Graffiti)
})
}
func (c *beaconApiValidatorClient) GetFeeRecipientByPubKey(_ context.Context, _ *ethpb.FeeRecipientByPubKeyRequest) (*ethpb.FeeRecipientByPubKeyResponse, error) {
@@ -83,35 +88,51 @@ func (c *beaconApiValidatorClient) GetFeeRecipientByPubKey(_ context.Context, _
}
func (c *beaconApiValidatorClient) GetSyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
return c.getSyncCommitteeContribution(ctx, in)
return wrapInMetrics[*ethpb.SyncCommitteeContribution]("GetSyncCommitteeContribution", func() (*ethpb.SyncCommitteeContribution, error) {
return c.getSyncCommitteeContribution(ctx, in)
})
}
func (c *beaconApiValidatorClient) GetSyncMessageBlockRoot(ctx context.Context, _ *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
return c.getSyncMessageBlockRoot(ctx)
return wrapInMetrics[*ethpb.SyncMessageBlockRootResponse]("GetSyncMessageBlockRoot", func() (*ethpb.SyncMessageBlockRootResponse, error) {
return c.getSyncMessageBlockRoot(ctx)
})
}
func (c *beaconApiValidatorClient) GetSyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
return c.getSyncSubcommitteeIndex(ctx, in)
return wrapInMetrics[*ethpb.SyncSubcommitteeIndexResponse]("GetSyncSubcommitteeIndex", func() (*ethpb.SyncSubcommitteeIndexResponse, error) {
return c.getSyncSubcommitteeIndex(ctx, in)
})
}
func (c *beaconApiValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
return c.multipleValidatorStatus(ctx, in)
return wrapInMetrics[*ethpb.MultipleValidatorStatusResponse]("MultipleValidatorStatus", func() (*ethpb.MultipleValidatorStatusResponse, error) {
return c.multipleValidatorStatus(ctx, in)
})
}
func (c *beaconApiValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
return new(empty.Empty), c.prepareBeaconProposer(ctx, in.Recipients)
return wrapInMetrics[*empty.Empty]("PrepareBeaconProposer", func() (*empty.Empty, error) {
return new(empty.Empty), c.prepareBeaconProposer(ctx, in.Recipients)
})
}
func (c *beaconApiValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
return c.proposeAttestation(ctx, in)
return wrapInMetrics[*ethpb.AttestResponse]("ProposeAttestation", func() (*ethpb.AttestResponse, error) {
return c.proposeAttestation(ctx, in)
})
}
func (c *beaconApiValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
return c.proposeBeaconBlock(ctx, in)
return wrapInMetrics[*ethpb.ProposeResponse]("ProposeBeaconBlock", func() (*ethpb.ProposeResponse, error) {
return c.proposeBeaconBlock(ctx, in)
})
}
func (c *beaconApiValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
return c.proposeExit(ctx, in)
return wrapInMetrics[*ethpb.ProposeExitResponse]("ProposeExit", func() (*ethpb.ProposeExitResponse, error) {
return c.proposeExit(ctx, in)
})
}
func (c *beaconApiValidatorClient) StreamSlots(ctx context.Context, in *ethpb.StreamSlotsRequest) (ethpb.BeaconNodeValidator_StreamSlotsClient, error) {
@@ -123,31 +144,45 @@ func (c *beaconApiValidatorClient) StreamBlocksAltair(ctx context.Context, in *e
}
func (c *beaconApiValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
return c.submitAggregateSelectionProof(ctx, in)
return wrapInMetrics[*ethpb.AggregateSelectionResponse]("SubmitAggregateSelectionProof", func() (*ethpb.AggregateSelectionResponse, error) {
return c.submitAggregateSelectionProof(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
return c.submitSignedAggregateSelectionProof(ctx, in)
return wrapInMetrics[*ethpb.SignedAggregateSubmitResponse]("SubmitSignedAggregateSelectionProof", func() (*ethpb.SignedAggregateSubmitResponse, error) {
return c.submitSignedAggregateSelectionProof(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
return new(empty.Empty), c.submitSignedContributionAndProof(ctx, in)
return wrapInMetrics[*empty.Empty]("SubmitSignedContributionAndProof", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitSignedContributionAndProof(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
return new(empty.Empty), c.submitSyncMessage(ctx, in)
return wrapInMetrics[*empty.Empty]("SubmitSyncMessage", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitSyncMessage(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
return new(empty.Empty), c.submitValidatorRegistrations(ctx, in.Messages)
return wrapInMetrics[*empty.Empty]("SubmitValidatorRegistrations", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitValidatorRegistrations(ctx, in.Messages)
})
}
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) (*empty.Empty, error) {
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, validatorIndices)
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
return wrapInMetrics[*empty.Empty]("SubscribeCommitteeSubnets", func() (*empty.Empty, error) {
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, duties)
})
}
func (c *beaconApiValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
return c.validatorIndex(ctx, in)
return wrapInMetrics[*ethpb.ValidatorIndexResponse]("ValidatorIndex", func() (*ethpb.ValidatorIndexResponse, error) {
return c.validatorIndex(ctx, in)
})
}
func (c *beaconApiValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
@@ -183,3 +218,15 @@ func (c *beaconApiValidatorClient) GetAggregatedSelections(ctx context.Context,
func (c *beaconApiValidatorClient) GetAggregatedSyncSelections(ctx context.Context, selections []iface.SyncCommitteeSelection) ([]iface.SyncCommitteeSelection, error) {
return c.getAggregatedSyncSelections(ctx, selections)
}
func wrapInMetrics[Resp any](action string, f func() (Resp, error)) (Resp, error) {
now := time.Now()
resp, err := f()
httpActionCount.WithLabelValues(action).Inc()
if err == nil {
httpActionLatency.WithLabelValues(action).Observe(time.Since(now).Seconds())
} else {
failedHTTPActionCount.WithLabelValues(action).Inc()
}
return resp, err
}

View File

@@ -54,12 +54,6 @@ func TestBeaconApiValidatorClient_GetAttestationDataValid(t *testing.T) {
assert.DeepEqual(t, expectedResp, resp)
}
func TestBeaconApiValidatorClient_GetAttestationDataNilInput(t *testing.T) {
validatorClient := beaconApiValidatorClient{}
_, err := validatorClient.GetAttestationData(context.Background(), nil)
assert.ErrorContains(t, "GetAttestationData received nil argument `in`", err)
}
func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) {
const slot = primitives.Slot(1)
const committeeIndex = primitives.CommitteeIndex(2)

View File

@@ -95,6 +95,14 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
if err != nil {
return nil, errors.Wrapf(err, "failed to get committees for epoch `%d`", epoch)
}
slotCommittees := make(map[string]uint64)
for _, c := range committees {
n, ok := slotCommittees[c.Slot]
if !ok {
n = 0
}
slotCommittees[c.Slot] = n + 1
}
// Mapping from a validator index to its attesting committee's index and slot
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
@@ -195,14 +203,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
}
duties[index] = &ethpb.DutiesResponse_Duty{
Committee: committeeValidatorIndices,
CommitteeIndex: committeeIndex,
AttesterSlot: attesterSlot,
ProposerSlots: proposerDutySlots[validatorIndex],
PublicKey: pubkey,
Status: validatorStatus.Status,
ValidatorIndex: validatorIndex,
IsSyncCommittee: syncDutiesMapping[validatorIndex],
Committee: committeeValidatorIndices,
CommitteeIndex: committeeIndex,
AttesterSlot: attesterSlot,
ProposerSlots: proposerDutySlots[validatorIndex],
PublicKey: pubkey,
Status: validatorStatus.Status,
ValidatorIndex: validatorIndex,
IsSyncCommittee: syncDutiesMapping[validatorIndex],
CommitteesAtSlot: slotCommittees[strconv.FormatUint(uint64(attesterSlot), 10)],
}
}

View File

@@ -880,69 +880,75 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
validatorIndices[0],
validatorIndices[1],
},
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[0],
Status: statuses[0],
ValidatorIndex: validatorIndices[0],
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[0],
Status: statuses[0],
ValidatorIndex: validatorIndices[0],
CommitteesAtSlot: 1,
},
{
Committee: []primitives.ValidatorIndex{
validatorIndices[0],
validatorIndices[1],
},
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[1],
Status: statuses[1],
ValidatorIndex: validatorIndices[1],
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[1],
Status: statuses[1],
ValidatorIndex: validatorIndices[1],
CommitteesAtSlot: 1,
},
{
Committee: []primitives.ValidatorIndex{
validatorIndices[2],
validatorIndices[3],
},
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[2],
Status: statuses[2],
ValidatorIndex: validatorIndices[2],
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[2],
Status: statuses[2],
ValidatorIndex: validatorIndices[2],
CommitteesAtSlot: 1,
},
{
Committee: []primitives.ValidatorIndex{
validatorIndices[2],
validatorIndices[3],
},
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[3],
Status: statuses[3],
ValidatorIndex: validatorIndices[3],
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[3],
Status: statuses[3],
ValidatorIndex: validatorIndices[3],
CommitteesAtSlot: 1,
},
{
Committee: []primitives.ValidatorIndex{
validatorIndices[4],
validatorIndices[5],
},
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[4],
Status: statuses[4],
ValidatorIndex: validatorIndices[4],
ProposerSlots: expectedProposerSlots1,
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[4],
Status: statuses[4],
ValidatorIndex: validatorIndices[4],
ProposerSlots: expectedProposerSlots1,
CommitteesAtSlot: 1,
},
{
Committee: []primitives.ValidatorIndex{
validatorIndices[4],
validatorIndices[5],
},
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[5],
Status: statuses[5],
ValidatorIndex: validatorIndices[5],
ProposerSlots: expectedProposerSlots2,
IsSyncCommittee: testCase.fetchSyncDuties,
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[5],
Status: statuses[5],
ValidatorIndex: validatorIndices[5],
ProposerSlots: expectedProposerSlots2,
IsSyncCommittee: testCase.fetchSyncDuties,
CommitteesAtSlot: 1,
},
{
PublicKey: pubkeys[6],

View File

@@ -0,0 +1,34 @@
package beacon_api
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
httpActionLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "validator",
Name: "http_action_latency_seconds",
Help: "Latency of HTTP actions performed against the beacon node in seconds. This metric captures only actions that didn't result in an error.",
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
},
[]string{"action"},
)
httpActionCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "http_action_count",
Help: "Number of all HTTP actions performed against the beacon node",
},
[]string{"action"},
)
failedHTTPActionCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_http_action_count",
Help: "Number of failed HTTP actions performed against the beacon node",
},
[]string{"action"},
)
)

View File

@@ -8,63 +8,26 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (c beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) error {
func (c beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) error {
if in == nil {
return errors.New("committee subnets subscribe request is nil")
}
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(validatorIndices) {
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length")
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(duties) {
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length")
}
slotToCommitteesAtSlotMap := make(map[primitives.Slot]uint64)
jsonCommitteeSubscriptions := make([]*structs.BeaconCommitteeSubscription, len(in.CommitteeIds))
for index := range in.CommitteeIds {
subscribeSlot := in.Slots[index]
subscribeCommitteeId := in.CommitteeIds[index]
subscribeIsAggregator := in.IsAggregator[index]
subscribeValidatorIndex := validatorIndices[index]
committeesAtSlot, foundSlot := slotToCommitteesAtSlotMap[subscribeSlot]
if !foundSlot {
// Lazily fetch the committeesAtSlot from the beacon node if they are not already in the map
epoch := slots.ToEpoch(subscribeSlot)
duties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, validatorIndices)
if err != nil {
return errors.Wrapf(err, "failed to get duties for epoch `%d`", epoch)
}
for _, duty := range duties {
dutySlot, err := strconv.ParseUint(duty.Slot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse slot `%s`", duty.Slot)
}
committees, err := strconv.ParseUint(duty.CommitteesAtSlot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse CommitteesAtSlot `%s`", duty.CommitteesAtSlot)
}
slotToCommitteesAtSlotMap[primitives.Slot(dutySlot)] = committees
}
// If the slot still isn't in the map, we either received bad data from the beacon node or the caller of this function gave us bad data
if committeesAtSlot, foundSlot = slotToCommitteesAtSlotMap[subscribeSlot]; !foundSlot {
return errors.Errorf("failed to get committees for slot `%d`", subscribeSlot)
}
}
jsonCommitteeSubscriptions[index] = &structs.BeaconCommitteeSubscription{
CommitteeIndex: strconv.FormatUint(uint64(subscribeCommitteeId), 10),
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot, 10),
Slot: strconv.FormatUint(uint64(subscribeSlot), 10),
IsAggregator: subscribeIsAggregator,
ValidatorIndex: strconv.FormatUint(uint64(subscribeValidatorIndex), 10),
CommitteeIndex: strconv.FormatUint(uint64(in.CommitteeIds[index]), 10),
CommitteesAtSlot: strconv.FormatUint(duties[index].CommitteesAtSlot, 10),
Slot: strconv.FormatUint(uint64(in.Slots[index]), 10),
IsAggregator: in.IsAggregator[index],
ValidatorIndex: strconv.FormatUint(uint64(duties[index].ValidatorIndex), 10),
}
}

View File

@@ -13,7 +13,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/prysmaticlabs/prysm/v5/validator/client/beacon-api/mock"
"go.uber.org/mock/gomock"
)
@@ -67,43 +66,8 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
}
}
// Even though we have 3 distinct slots, the first 2 ones are in the same epoch so we should only send 2 requests to the beacon node
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
slots.ToEpoch(subscribeSlots[0]),
validatorIndices,
).Return(
[]*structs.AttesterDuty{
{
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[0], 10),
Slot: strconv.FormatUint(uint64(subscribeSlots[0]), 10),
},
{
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[1], 10),
Slot: strconv.FormatUint(uint64(subscribeSlots[1]), 10),
},
},
nil,
).Times(1)
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
slots.ToEpoch(subscribeSlots[2]),
validatorIndices,
).Return(
[]*structs.AttesterDuty{
{
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[2], 10),
Slot: strconv.FormatUint(uint64(subscribeSlots[2]), 10),
},
},
nil,
).Times(1)
validatorClient := &beaconApiValidatorClient{
jsonRestHandler: jsonRestHandler,
dutiesProvider: dutiesProvider,
}
err = validatorClient.subscribeCommitteeSubnets(
ctx,
@@ -112,21 +76,31 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
CommitteeIds: committeeIndices,
IsAggregator: isAggregator,
},
validatorIndices,
[]*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: validatorIndices[0],
CommitteesAtSlot: committeesAtSlot[0],
},
{
ValidatorIndex: validatorIndices[1],
CommitteesAtSlot: committeesAtSlot[1],
},
{
ValidatorIndex: validatorIndices[2],
CommitteesAtSlot: committeesAtSlot[2],
},
},
)
require.NoError(t, err)
}
func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length"
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length"
testCases := []struct {
name string
subscribeRequest *ethpb.CommitteeSubnetsSubscribeRequest
validatorIndices []primitives.ValidatorIndex
attesterDuty *structs.AttesterDuty
dutiesError error
expectGetDutiesQuery bool
duties []*ethpb.DutiesResponse_Duty
expectSubscribeRestCall bool
expectedErrorMessage string
}{
@@ -142,7 +116,16 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false, true},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
duties: []*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
@@ -152,7 +135,16 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1},
IsAggregator: []bool{false, true},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
duties: []*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
@@ -162,76 +154,33 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
duties: []*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
name: "ValidatorIndices size mismatch",
name: "duties size mismatch",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
CommitteeIds: []primitives.CommitteeIndex{1, 2},
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false, true},
},
validatorIndices: []primitives.ValidatorIndex{1},
duties: []*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
name: "bad duties query",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
Slots: []primitives.Slot{1},
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{3},
dutiesError: errors.New("foo error"),
expectGetDutiesQuery: true,
expectedErrorMessage: "failed to get duties for epoch `0`: foo error",
},
{
name: "bad duty slot",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
Slots: []primitives.Slot{1},
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{3},
attesterDuty: &structs.AttesterDuty{
Slot: "foo",
CommitteesAtSlot: "1",
},
expectGetDutiesQuery: true,
expectedErrorMessage: "failed to parse slot `foo`",
},
{
name: "bad duty committees at slot",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
Slots: []primitives.Slot{1},
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{3},
attesterDuty: &structs.AttesterDuty{
Slot: "1",
CommitteesAtSlot: "foo",
},
expectGetDutiesQuery: true,
expectedErrorMessage: "failed to parse CommitteesAtSlot `foo`",
},
{
name: "missing slot in duties",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
Slots: []primitives.Slot{1},
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{3},
attesterDuty: &structs.AttesterDuty{
Slot: "2",
CommitteesAtSlot: "3",
},
expectGetDutiesQuery: true,
expectedErrorMessage: "failed to get committees for slot `1`",
},
{
name: "bad POST request",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
@@ -239,12 +188,12 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
validatorIndices: []primitives.ValidatorIndex{3},
attesterDuty: &structs.AttesterDuty{
Slot: "1",
CommitteesAtSlot: "2",
duties: []*ethpb.DutiesResponse_Duty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
},
expectGetDutiesQuery: true,
expectSubscribeRestCall: true,
expectedErrorMessage: "foo error",
},
@@ -257,18 +206,6 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
ctx := context.Background()
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
if testCase.expectGetDutiesQuery {
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
gomock.Any(),
gomock.Any(),
).Return(
[]*structs.AttesterDuty{testCase.attesterDuty},
testCase.dutiesError,
).Times(1)
}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
if testCase.expectSubscribeRestCall {
jsonRestHandler.EXPECT().Post(
@@ -284,9 +221,8 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
validatorClient := &beaconApiValidatorClient{
jsonRestHandler: jsonRestHandler,
dutiesProvider: dutiesProvider,
}
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.validatorIndices)
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.duties)
assert.ErrorContains(t, testCase.expectedErrorMessage, err)
})
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
"google.golang.org/grpc"
@@ -99,7 +98,7 @@ func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context,
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
}
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*empty.Empty, error) {
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
}

View File

@@ -137,7 +137,7 @@ type ValidatorClient interface {
SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error)
SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error)
ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error)
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) (*empty.Empty, error)
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error)
CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error)
GetSyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error)
SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error)

View File

@@ -629,7 +629,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
subscribeSlots := make([]primitives.Slot, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
subscribeCommitteeIndices := make([]primitives.CommitteeIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
subscribeIsAggregator := make([]bool, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
subscribeValidatorIndices := make([]primitives.ValidatorIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
activeDuties := make([]*ethpb.DutiesResponse_Duty, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
alreadySubscribed := make(map[[64]byte]bool)
if v.distributed {
@@ -662,7 +662,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
activeDuties = append(activeDuties, duty)
}
}
@@ -688,7 +688,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
activeDuties = append(activeDuties, duty)
}
}
@@ -698,7 +698,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
CommitteeIds: subscribeCommitteeIndices,
IsAggregator: subscribeIsAggregator,
},
subscribeValidatorIndices,
activeDuties,
)
return err

View File

@@ -520,7 +520,7 @@ func TestUpdateDuties_OK(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})
@@ -568,7 +568,7 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})
@@ -700,7 +700,7 @@ func TestUpdateDuties_Distributed(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})

View File

@@ -124,13 +124,18 @@ func ConvertDatabase(ctx context.Context, sourceDataDir string, targetDataDir st
// -----------------
// Get the proposer settings.
proposerSettings, err := sourceDatabase.ProposerSettings(ctx)
if err != nil {
return errors.Wrap(err, "could not get proposer settings from source database")
}
// Save the proposer settings.
if err := targetDatabase.SaveProposerSettings(ctx, proposerSettings); err != nil {
return errors.Wrap(err, "could not save proposer settings")
switch err {
case nil:
// Save the proposer settings.
if err := targetDatabase.SaveProposerSettings(ctx, proposerSettings); err != nil {
return errors.Wrap(err, "could not save proposer settings")
}
case kv.ErrNoProposerSettingsFound, filesystem.ErrNoProposerSettingsFound:
// Nothing to do.
default:
return errors.Wrap(err, "could not get proposer settings from source database")
}
// Attestations

View File

@@ -49,217 +49,224 @@ func TestDB_ConvertDatabase(t *testing.T) {
defaultFeeRecipient := getFeeRecipientFromString(t, defaultFeeRecipientString)
customFeeRecipient := getFeeRecipientFromString(t, customFeeRecipientString)
for _, minimalToComplete := range []bool{false, true} {
t.Run(fmt.Sprintf("minimalToComplete=%v", minimalToComplete), func(t *testing.T) {
// Create signing root
signingRoot := [fieldparams.RootLength]byte{}
var signingRootBytes []byte
if minimalToComplete {
signingRootBytes = signingRoot[:]
}
for _, minimalToComplete := range [...]bool{false, true} {
for _, withProposerSettings := range [...]bool{false, true} {
t.Run(fmt.Sprintf("minimalToComplete=%v", minimalToComplete), func(t *testing.T) {
// Create signing root
signingRoot := [fieldparams.RootLength]byte{}
var signingRootBytes []byte
if minimalToComplete {
signingRootBytes = signingRoot[:]
}
// Create database directoriy path.
datadir := t.TempDir()
// Create database directoriy path.
datadir := t.TempDir()
// Run source DB preparation.
// --------------------------
// Create the source database.
var (
sourceDatabase, targetDatabase iface.ValidatorDB
err error
)
// Run source DB preparation.
// --------------------------
// Create the source database.
var (
sourceDatabase, targetDatabase iface.ValidatorDB
err error
)
if minimalToComplete {
sourceDatabase, err = filesystem.NewStore(datadir, &filesystem.Config{
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
})
} else {
sourceDatabase, err = kv.NewKVStore(ctx, datadir, &kv.Config{
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
})
}
if minimalToComplete {
sourceDatabase, err = filesystem.NewStore(datadir, &filesystem.Config{
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
})
} else {
sourceDatabase, err = kv.NewKVStore(ctx, datadir, &kv.Config{
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
})
}
require.NoError(t, err, "could not create source database")
require.NoError(t, err, "could not create source database")
// Save the genesis validator root.
expectedGenesisValidatorRoot := []byte("genesis-validator-root")
err = sourceDatabase.SaveGenesisValidatorsRoot(ctx, expectedGenesisValidatorRoot)
require.NoError(t, err, "could not save genesis validator root")
// Save the genesis validator root.
expectedGenesisValidatorRoot := []byte("genesis-validator-root")
err = sourceDatabase.SaveGenesisValidatorsRoot(ctx, expectedGenesisValidatorRoot)
require.NoError(t, err, "could not save genesis validator root")
// Save the graffiti file hash.
// (Getting the graffiti ordered index will set the graffiti file hash)
expectedGraffitiFileHash := [32]byte{1}
_, err = sourceDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
require.NoError(t, err, "could not get graffiti ordered index")
// Save the graffiti file hash.
// (Getting the graffiti ordered index will set the graffiti file hash)
expectedGraffitiFileHash := [32]byte{1}
_, err = sourceDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
require.NoError(t, err, "could not get graffiti ordered index")
// Save the graffiti ordered index.
expectedGraffitiOrderedIndex := uint64(1)
err = sourceDatabase.SaveGraffitiOrderedIndex(ctx, expectedGraffitiOrderedIndex)
require.NoError(t, err, "could not save graffiti ordered index")
// Save the graffiti ordered index.
expectedGraffitiOrderedIndex := uint64(1)
err = sourceDatabase.SaveGraffitiOrderedIndex(ctx, expectedGraffitiOrderedIndex)
require.NoError(t, err, "could not save graffiti ordered index")
// Save the proposer settings.
var relays []string = nil
// Save the proposer settings.
var relays []string = nil
expectedProposerSettings := &proposer.Settings{}
expectedProposerSettings := &proposer.Settings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
pubkey1: {
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: customFeeRecipient,
if withProposerSettings {
expectedProposerSettings = &proposer.Settings{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
pubkey1: {
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: customFeeRecipient,
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: true,
GasLimit: 42,
Relays: relays,
},
},
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: true,
GasLimit: 42,
Relays: relays,
DefaultConfig: &proposer.Option{
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: defaultFeeRecipient,
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: false,
GasLimit: 43,
Relays: relays,
},
},
}
err = sourceDatabase.SaveProposerSettings(ctx, expectedProposerSettings)
require.NoError(t, err, "could not save proposer settings")
}
// Save some attestations.
completeAttestations := []*ethpb.IndexedAttestation{
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: 1,
},
Target: &ethpb.Checkpoint{
Epoch: 2,
},
},
},
},
DefaultConfig: &proposer.Option{
FeeRecipientConfig: &proposer.FeeRecipientConfig{
FeeRecipient: defaultFeeRecipient,
},
BuilderConfig: &proposer.BuilderConfig{
Enabled: false,
GasLimit: 43,
Relays: relays,
},
},
}
err = sourceDatabase.SaveProposerSettings(ctx, expectedProposerSettings)
require.NoError(t, err, "could not save proposer settings")
// Save some attestations.
completeAttestations := []*ethpb.IndexedAttestation{
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: 1,
},
Target: &ethpb.Checkpoint{
Epoch: 2,
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: 2,
},
Target: &ethpb.Checkpoint{
Epoch: 3,
},
},
},
},
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: 2,
},
Target: &ethpb.Checkpoint{
Epoch: 3,
},
}
expectedAttestationRecords1 := []*common.AttestationRecord{
{
PubKey: pubkey1,
Source: primitives.Epoch(2),
Target: primitives.Epoch(3),
SigningRoot: signingRootBytes,
},
},
}
}
expectedAttestationRecords1 := []*common.AttestationRecord{
{
PubKey: pubkey1,
Source: primitives.Epoch(2),
Target: primitives.Epoch(3),
SigningRoot: signingRootBytes,
},
}
expectedAttestationRecords2 := []*common.AttestationRecord{
{
PubKey: pubkey2,
Source: primitives.Epoch(2),
Target: primitives.Epoch(3),
SigningRoot: signingRootBytes,
},
}
expectedAttestationRecords2 := []*common.AttestationRecord{
{
PubKey: pubkey2,
Source: primitives.Epoch(2),
Target: primitives.Epoch(3),
SigningRoot: signingRootBytes,
},
}
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey1, [][]byte{{1}, {2}}, completeAttestations)
require.NoError(t, err, "could not save attestations")
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey1, [][]byte{{1}, {2}}, completeAttestations)
require.NoError(t, err, "could not save attestations")
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey2, [][]byte{{1}, {2}}, completeAttestations)
require.NoError(t, err, "could not save attestations")
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey2, [][]byte{{1}, {2}}, completeAttestations)
require.NoError(t, err, "could not save attestations")
// Save some block proposals.
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 42, []byte{})
require.NoError(t, err, "could not save block proposal")
// Save some block proposals.
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 42, []byte{})
require.NoError(t, err, "could not save block proposal")
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 43, []byte{})
require.NoError(t, err, "could not save block proposal")
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 43, []byte{})
require.NoError(t, err, "could not save block proposal")
expectedProposals := []*common.Proposal{
{
Slot: 43,
SigningRoot: signingRootBytes,
},
}
expectedProposals := []*common.Proposal{
{
Slot: 43,
SigningRoot: signingRootBytes,
},
}
// Close the source database.
err = sourceDatabase.Close()
require.NoError(t, err, "could not close source database")
// Close the source database.
err = sourceDatabase.Close()
require.NoError(t, err, "could not close source database")
// Source to target DB conversion.
// -------------------------------
err = ConvertDatabase(ctx, datadir, datadir, minimalToComplete)
require.NoError(t, err, "could not convert source to target database")
// Source to target DB conversion.
// ----------------------------------------
err = ConvertDatabase(ctx, datadir, datadir, minimalToComplete)
require.NoError(t, err, "could not convert source to target database")
// Check the target database.
// --------------------------
if minimalToComplete {
targetDatabase, err = kv.NewKVStore(ctx, datadir, nil)
} else {
targetDatabase, err = filesystem.NewStore(datadir, nil)
}
require.NoError(t, err, "could not get minimal database")
// Check the target database.
// --------------------------
if minimalToComplete {
targetDatabase, err = kv.NewKVStore(ctx, datadir, nil)
} else {
targetDatabase, err = filesystem.NewStore(datadir, nil)
}
require.NoError(t, err, "could not get minimal database")
// Check the genesis validator root.
actualGenesisValidatoRoot, err := targetDatabase.GenesisValidatorsRoot(ctx)
require.NoError(t, err, "could not get genesis validator root from target database")
require.DeepSSZEqual(t, expectedGenesisValidatorRoot, actualGenesisValidatoRoot, "genesis validator root should match")
// Check the genesis validator root.
actualGenesisValidatoRoot, err := targetDatabase.GenesisValidatorsRoot(ctx)
require.NoError(t, err, "could not get genesis validator root from target database")
require.DeepSSZEqual(t, expectedGenesisValidatorRoot, actualGenesisValidatoRoot, "genesis validator root should match")
// Check the graffiti file hash.
actualGraffitiFileHash, exists, err := targetDatabase.GraffitiFileHash()
require.NoError(t, err, "could not get graffiti file hash from target database")
require.Equal(t, true, exists, "graffiti file hash should exist")
require.Equal(t, expectedGraffitiFileHash, actualGraffitiFileHash, "graffiti file hash should match")
// Check the graffiti file hash.
actualGraffitiFileHash, exists, err := targetDatabase.GraffitiFileHash()
require.NoError(t, err, "could not get graffiti file hash from target database")
require.Equal(t, true, exists, "graffiti file hash should exist")
require.Equal(t, expectedGraffitiFileHash, actualGraffitiFileHash, "graffiti file hash should match")
// Check the graffiti ordered index.
actualGraffitiOrderedIndex, err := targetDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
require.NoError(t, err, "could not get graffiti ordered index from target database")
require.Equal(t, expectedGraffitiOrderedIndex, actualGraffitiOrderedIndex, "graffiti ordered index should match")
// Check the graffiti ordered index.
actualGraffitiOrderedIndex, err := targetDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
require.NoError(t, err, "could not get graffiti ordered index from target database")
require.Equal(t, expectedGraffitiOrderedIndex, actualGraffitiOrderedIndex, "graffiti ordered index should match")
if withProposerSettings {
// Check the proposer settings.
actualProposerSettings, err := targetDatabase.ProposerSettings(ctx)
require.NoError(t, err, "could not get proposer settings from target database")
require.DeepEqual(t, expectedProposerSettings, actualProposerSettings, "proposer settings should match")
}
// Check the proposer settings.
actualProposerSettings, err := targetDatabase.ProposerSettings(ctx)
require.NoError(t, err, "could not get proposer settings from target database")
require.DeepEqual(t, expectedProposerSettings, actualProposerSettings, "proposer settings should match")
// Check the attestations.
actualAttestationRecords, err := targetDatabase.AttestationHistoryForPubKey(ctx, pubkey1)
require.NoError(t, err, "could not get attestations from target database")
require.DeepEqual(t, expectedAttestationRecords1, actualAttestationRecords, "attestations should match")
// Check the attestations.
actualAttestationRecords, err := targetDatabase.AttestationHistoryForPubKey(ctx, pubkey1)
require.NoError(t, err, "could not get attestations from target database")
require.DeepEqual(t, expectedAttestationRecords1, actualAttestationRecords, "attestations should match")
actualAttestationRecords, err = targetDatabase.AttestationHistoryForPubKey(ctx, pubkey2)
require.NoError(t, err, "could not get attestations from target database")
require.DeepEqual(t, expectedAttestationRecords2, actualAttestationRecords, "attestations should match")
actualAttestationRecords, err = targetDatabase.AttestationHistoryForPubKey(ctx, pubkey2)
require.NoError(t, err, "could not get attestations from target database")
require.DeepEqual(t, expectedAttestationRecords2, actualAttestationRecords, "attestations should match")
// Check the block proposals.
actualProposals, err := targetDatabase.ProposalHistoryForPubKey(ctx, pubkey1)
require.NoError(t, err, "could not get block proposals from target database")
require.DeepEqual(t, expectedProposals, actualProposals, "block proposals should match")
// Check the block proposals.
actualProposals, err := targetDatabase.ProposalHistoryForPubKey(ctx, pubkey1)
require.NoError(t, err, "could not get block proposals from target database")
require.DeepEqual(t, expectedProposals, actualProposals, "block proposals should match")
// Close the target database.
err = targetDatabase.Close()
require.NoError(t, err, "could not close target database")
// Close the target database.
err = targetDatabase.Close()
require.NoError(t, err, "could not close target database")
// Check the source database does not exist anymore.
var existing bool
// Check the source database does not exist anymore.
var existing bool
if minimalToComplete {
databasePath := filepath.Join(datadir, filesystem.DatabaseDirName)
existing, err = file.Exists(databasePath, file.Directory)
} else {
databasePath := filepath.Join(datadir, kv.ProtectionDbFileName)
existing, err = file.Exists(databasePath, file.Regular)
}
if minimalToComplete {
databasePath := filepath.Join(datadir, filesystem.DatabaseDirName)
existing, err = file.Exists(databasePath, file.Directory)
} else {
databasePath := filepath.Join(datadir, kv.ProtectionDbFileName)
existing, err = file.Exists(databasePath, file.Regular)
}
require.NoError(t, err, "could not check if source database exists")
require.Equal(t, false, existing, "source database should not exist")
})
require.NoError(t, err, "could not check if source database exists")
require.Equal(t, false, existing, "source database should not exist")
})
}
}
}

View File

@@ -74,13 +74,13 @@ type KeyStatus struct {
type KeyStatusType string
const (
StatusImported KeyStatusType = "IMPORTED"
StatusError KeyStatusType = "ERROR"
StatusDuplicate KeyStatusType = "DUPLICATE"
StatusUnknown KeyStatusType = "UNKNOWN"
StatusNotFound KeyStatusType = "NOT_FOUND"
StatusDeleted KeyStatusType = "DELETED"
StatusNotActive KeyStatusType = "NOT_ACTIVE"
StatusImported KeyStatusType = "imported"
StatusError KeyStatusType = "error"
StatusDuplicate KeyStatusType = "duplicate"
StatusUnknown KeyStatusType = "unknown"
StatusNotFound KeyStatusType = "not_found"
StatusDeleted KeyStatusType = "deleted"
StatusNotActive KeyStatusType = "not_active"
)
// PublicKeyDeleter allows deleting public keys set in keymanager.

View File

@@ -30,6 +30,7 @@ go_library(
"//async/event:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//cmd:go_default_library",
"//cmd/validator/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/cmd/validator/flags"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/config/proposer"
@@ -479,7 +480,7 @@ func (s *Server) ImportRemoteKeys(w http.ResponseWriter, r *http.Request) {
}
}
if isUrlUsed {
log.Warnf("Setting web3signer base url for IMPORTED keys is not supported. Prysm only uses the url from --validators-external-signer-url flag for web3signerKeymanagerKind.")
log.Warnf("Setting the remote signer base url within the request is not supported. The remote signer url can only be set from the --%s flag.", flags.Web3SignerURLFlag.Name)
}
httputil.WriteJson(w, &RemoteKeysResponse{Data: adder.AddPublicKeys(remoteKeys)})

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
@@ -204,7 +205,7 @@ func TestServer_ImportKeystores(t *testing.T) {
resp := &ImportKeystoresResponse{}
require.NoError(t, json.Unmarshal(wr.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
require.Equal(t, keymanager.StatusError, resp.Data[0].Status)
require.Equal(t, fmt.Sprintf("%v", keymanager.StatusError), strings.ToLower(string(resp.Data[0].Status))) // make sure it's lower case
})
t.Run("200 response even if number of passwords does not match number of keystores", func(t *testing.T) {
request := &ImportKeystoresRequest{
@@ -1406,6 +1407,7 @@ func TestServer_ImportRemoteKeys(t *testing.T) {
require.NoError(t, json.Unmarshal(w.Body.Bytes(), resp))
for i := 0; i < len(resp.Data); i++ {
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
require.Equal(t, fmt.Sprintf("%v", expectedStatuses[i].Status), strings.ToLower(string(resp.Data[i].Status)))
}
})
}