mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
039b86bd89 | ||
|
|
9619170df1 | ||
|
|
4da0abace7 | ||
|
|
d919f800e3 | ||
|
|
38f095d556 | ||
|
|
04010d45c8 | ||
|
|
1a048a2f2a | ||
|
|
3df2dedbb2 | ||
|
|
2fee906d25 | ||
|
|
9f44d6e452 | ||
|
|
55f311eb73 | ||
|
|
0f90bacac9 | ||
|
|
30974039f3 | ||
|
|
3392ecb3e1 | ||
|
|
fa2b64f702 | ||
|
|
6f5e35f08a | ||
|
|
79d6ce45ad | ||
|
|
73cd7df679 | ||
|
|
d084d5a979 | ||
|
|
db6b1c15c4 | ||
|
|
7c9bff489e | ||
|
|
1fca73d761 | ||
|
|
fbafbdd62c |
@@ -1,4 +1,4 @@
|
||||
# Dependency Managagement in Prysm
|
||||
# Dependency Management in Prysm
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
|
||||
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
||||
pb.go files at build time when file changes are present in any protobuf definition file or after
|
||||
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
||||
the following scripts often to ensure their generated files are up to date. Further more, Prysm
|
||||
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
|
||||
generates SSZ marshal related code based on defined data structures. These generated files must
|
||||
also be updated and checked in as frequently.
|
||||
|
||||
|
||||
@@ -28,13 +28,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
@@ -146,7 +147,6 @@ func withSSZEncoding() reqOption {
|
||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -343,6 +343,60 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal JSON")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid format, failed to create new POST request object")
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.DisallowUnknownFields()
|
||||
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||
if err := decoder.Decode(errorJson); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||
}
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
body, err := c.get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
|
||||
@@ -49,9 +49,13 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
@@ -105,7 +109,7 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
return blk.ParentHash[:], blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// canUseValidatedTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// spec code:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
//
|
||||
@@ -113,17 +117,17 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
func canUseValidatedTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) (bool, error) {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
return false, errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
return false, errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func Test_validTerminalPowBlock(t *testing.T) {
|
||||
@@ -213,20 +214,42 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
ok, err := canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", err)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", err)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
wrapped, err = blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
blk.Block().SetSlot(1)
|
||||
require.NoError(t, blk.Block().Body().SetExecution(wrapped))
|
||||
require.NoError(t, service.validateMergeBlock(ctx, blk))
|
||||
}
|
||||
|
||||
@@ -557,22 +557,6 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, change := range changes {
|
||||
if err := s.cfg.BLSToExecPool.MarkIncluded(change); err != nil {
|
||||
return errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
@@ -2305,65 +2304,6 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
|
||||
}
|
||||
|
||||
func TestHandleBBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
pbb := ðpb.BeaconBlockBellatrix{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyCapella{}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
idx := types.ValidatorIndex(123)
|
||||
change := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: idx,
|
||||
}
|
||||
signedChange := ðpb.SignedBLSToExecutionChange{
|
||||
Message: change,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
|
||||
}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -150,6 +151,11 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark block BLS changes as seen so we don't include same ones in future blocks.
|
||||
if err := s.handleBlockBLSToExecChanges(b); err != nil {
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
@@ -157,6 +163,20 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, change := range changes {
|
||||
s.cfg.BLSToExecPool.MarkIncluded(change)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -332,3 +333,62 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
pbb := ðpb.BeaconBlockBellatrix{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyCapella{}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
idx := types.ValidatorIndex(123)
|
||||
change := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: idx,
|
||||
}
|
||||
signedChange := ðpb.SignedBLSToExecutionChange{
|
||||
Message: change,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
|
||||
}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -131,3 +131,118 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
helpers.ActivationExitEpoch(types.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)), newRegistry[0].ExitEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
type args struct {
|
||||
currentSlot types.Slot
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Empty Exit",
|
||||
args: args{
|
||||
currentSlot: 0,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
genesisRoot := [32]byte{'a'}
|
||||
return ðpb.Validator{}, ðpb.SignedVoluntaryExit{}, fork, genesisRoot[:], nil
|
||||
},
|
||||
wantErr: "nil exit",
|
||||
},
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
err := bs.UpdateValidatorAtIndex(0, validator)
|
||||
require.NoError(t, err)
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
return validator, signedExit, fork, bs.GenesisValidatorsRoot(), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad signature",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
genesisRoot := [32]byte{'a'}
|
||||
// use wrong genesis root and don't update validator
|
||||
return validator, signedExit, fork, genesisRoot[:], nil
|
||||
},
|
||||
wantErr: "signature did not verify",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := params.BeaconConfig().ShardCommitteePeriod
|
||||
params.BeaconConfig().ShardCommitteePeriod = 0
|
||||
validator, signedExit, fork, genesisRoot, err := tt.setup()
|
||||
require.NoError(t, err)
|
||||
rvalidator, err := state_native.NewValidator(validator)
|
||||
require.NoError(t, err)
|
||||
err = blocks.VerifyExitAndSignature(
|
||||
rvalidator,
|
||||
tt.args.currentSlot,
|
||||
fork,
|
||||
signedExit,
|
||||
genesisRoot,
|
||||
)
|
||||
if tt.wantErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
}
|
||||
params.BeaconConfig().ShardCommitteePeriod = c // prevent contamination
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@ func (RPCClientBad) CallContext(context.Context, interface{}, string, ...interfa
|
||||
}
|
||||
|
||||
func TestClient_IPC(t *testing.T) {
|
||||
t.Skip("Skipping IPC test to support Capella devnet-3")
|
||||
server := newTestIPCServer(t)
|
||||
defer server.Stop()
|
||||
rpcClient := rpc.DialInProc(server)
|
||||
@@ -154,6 +155,8 @@ func TestClient_IPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_HTTP(t *testing.T) {
|
||||
t.Skip("Skipping HTTP test to support Capella devnet-3")
|
||||
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
|
||||
@@ -120,16 +120,16 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
// 4
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
// 5 <- head, justified epoch = 2
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert block 6 with justified epoch 3: verify it's head
|
||||
// 0
|
||||
@@ -138,15 +138,15 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
// 4
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 3
|
||||
// 5 6 <- head, justified epoch = 3
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
@@ -174,10 +174,10 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert block 9 with justified epoch 3, it becomes head
|
||||
// Verify 9 is the head:
|
||||
// Insert block 10 with justified epoch 3, it becomes head
|
||||
// Verify 10 is the head:
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert new block 11 and verify head is at 11.
|
||||
// 5 6
|
||||
|
||||
@@ -448,6 +448,11 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ func (m *PoolMock) PendingBLSToExecChanges() ([]*eth.SignedBLSToExecutionChange,
|
||||
}
|
||||
|
||||
// BLSToExecChangesForInclusion --
|
||||
func (m *PoolMock) BLSToExecChangesForInclusion(_ state.BeaconState) ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
func (m *PoolMock) BLSToExecChangesForInclusion(_ state.ReadOnlyBeaconState) ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
return m.Changes, nil
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func (m *PoolMock) InsertBLSToExecChange(change *eth.SignedBLSToExecutionChange)
|
||||
}
|
||||
|
||||
// MarkIncluded --
|
||||
func (*PoolMock) MarkIncluded(_ *eth.SignedBLSToExecutionChange) error {
|
||||
func (*PoolMock) MarkIncluded(_ *eth.SignedBLSToExecutionChange) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -19,9 +18,9 @@ import (
|
||||
// This pool is used by proposers to insert BLS-to-execution-change objects into new blocks.
|
||||
type PoolManager interface {
|
||||
PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
BLSToExecChangesForInclusion(state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
BLSToExecChangesForInclusion(beaconState state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
InsertBLSToExecChange(change *ethpb.SignedBLSToExecutionChange)
|
||||
MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error
|
||||
MarkIncluded(change *ethpb.SignedBLSToExecutionChange)
|
||||
ValidatorExists(idx types.ValidatorIndex) bool
|
||||
}
|
||||
|
||||
@@ -61,13 +60,13 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// BLSToExecChangesForInclusion returns objects that are ready for inclusion at the given slot.
|
||||
// BLSToExecChangesForInclusion returns objects that are ready for inclusion.
|
||||
// This method will not return more than the block enforced MaxBlsToExecutionChanges.
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
node := p.pending.First()
|
||||
node := p.pending.Last()
|
||||
for node != nil && len(result) < length {
|
||||
change, err := node.Value()
|
||||
if err != nil {
|
||||
@@ -79,14 +78,12 @@ func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.Sign
|
||||
logrus.WithError(err).Warning("removing invalid BLSToExecutionChange from pool")
|
||||
// MarkIncluded removes the invalid change from the pool
|
||||
p.lock.RUnlock()
|
||||
if err := p.MarkIncluded(change); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
p.MarkIncluded(change)
|
||||
p.lock.RLock()
|
||||
} else {
|
||||
result = append(result, change)
|
||||
}
|
||||
node, err = node.Next()
|
||||
node, err = node.Prev()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
@@ -118,9 +115,7 @@ func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.Sign
|
||||
}
|
||||
if !signature.Verify(cSet.PublicKeys[i], cSet.Messages[i][:]) {
|
||||
logrus.Warning("removing BLSToExecutionChange with invalid signature from pool")
|
||||
if err := p.MarkIncluded(result[i]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
p.MarkIncluded(result[i])
|
||||
} else {
|
||||
verified = append(verified, result[i])
|
||||
}
|
||||
@@ -143,19 +138,18 @@ func (p *Pool) InsertBLSToExecChange(change *ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
// MarkIncluded is used when an object has been included in a beacon block. Every block seen by this
|
||||
// listNode should call this method to include the object. This will remove the object from the pool.
|
||||
func (p *Pool) MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error {
|
||||
// node should call this method to include the object. This will remove the object from the pool.
|
||||
func (p *Pool) MarkIncluded(change *ethpb.SignedBLSToExecutionChange) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
node := p.m[change.Message.ValidatorIndex]
|
||||
if node == nil {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
delete(p.m, change.Message.ValidatorIndex)
|
||||
p.pending.Remove(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatorExists checks if the bls to execution change object exists
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
// We want FIFO semantics, which means validator with index 16 shouldn't be returned
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
for _, ch := range changes {
|
||||
assert.NotEqual(t, types.ValidatorIndex(16), ch.Message.ValidatorIndex)
|
||||
assert.NotEqual(t, types.ValidatorIndex(15), ch.Message.ValidatorIndex)
|
||||
}
|
||||
})
|
||||
t.Run("One Bad change", func(t *testing.T) {
|
||||
@@ -143,19 +143,19 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
assert.Equal(t, types.ValidatorIndex(30), changes[1].Message.ValidatorIndex)
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
|
||||
})
|
||||
t.Run("One Bad Signature", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
copy(signedChanges[1].Signature, signedChanges[2].Signature)
|
||||
copy(signedChanges[30].Signature, signedChanges[31].Signature)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
assert.Equal(t, types.ValidatorIndex(29), changes[1].Message.ValidatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -237,7 +237,7 @@ func TestMarkIncluded(t *testing.T) {
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
require.NoError(t, pool.MarkIncluded(change))
|
||||
pool.MarkIncluded(change)
|
||||
assert.Equal(t, 0, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
assert.Equal(t, false, ok)
|
||||
@@ -259,7 +259,7 @@ func TestMarkIncluded(t *testing.T) {
|
||||
pool.InsertBLSToExecChange(first)
|
||||
pool.InsertBLSToExecChange(second)
|
||||
pool.InsertBLSToExecChange(third)
|
||||
require.NoError(t, pool.MarkIncluded(first))
|
||||
pool.MarkIncluded(first)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
assert.Equal(t, false, ok)
|
||||
@@ -281,7 +281,7 @@ func TestMarkIncluded(t *testing.T) {
|
||||
pool.InsertBLSToExecChange(first)
|
||||
pool.InsertBLSToExecChange(second)
|
||||
pool.InsertBLSToExecChange(third)
|
||||
require.NoError(t, pool.MarkIncluded(third))
|
||||
pool.MarkIncluded(third)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[2]
|
||||
assert.Equal(t, false, ok)
|
||||
@@ -303,7 +303,7 @@ func TestMarkIncluded(t *testing.T) {
|
||||
pool.InsertBLSToExecChange(first)
|
||||
pool.InsertBLSToExecChange(second)
|
||||
pool.InsertBLSToExecChange(third)
|
||||
require.NoError(t, pool.MarkIncluded(second))
|
||||
pool.MarkIncluded(second)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[1]
|
||||
assert.Equal(t, false, ok)
|
||||
@@ -324,7 +324,7 @@ func TestMarkIncluded(t *testing.T) {
|
||||
}}
|
||||
pool.InsertBLSToExecChange(first)
|
||||
pool.InsertBLSToExecChange(second)
|
||||
require.NoError(t, pool.MarkIncluded(change))
|
||||
pool.MarkIncluded(change)
|
||||
require.Equal(t, 2, pool.pending.Len())
|
||||
_, ok := pool.m[0]
|
||||
require.Equal(t, true, ok)
|
||||
@@ -378,7 +378,7 @@ func TestValidatorExists(t *testing.T) {
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
require.NoError(t, pool.MarkIncluded(change))
|
||||
pool.MarkIncluded(change)
|
||||
assert.Equal(t, false, pool.ValidatorExists(0))
|
||||
})
|
||||
t.Run("multiple validators added to pool and removed", func(t *testing.T) {
|
||||
@@ -399,8 +399,8 @@ func TestValidatorExists(t *testing.T) {
|
||||
}}
|
||||
pool.InsertBLSToExecChange(thirdChange)
|
||||
|
||||
assert.NoError(t, pool.MarkIncluded(firstChange))
|
||||
assert.NoError(t, pool.MarkIncluded(thirdChange))
|
||||
pool.MarkIncluded(firstChange)
|
||||
pool.MarkIncluded(thirdChange)
|
||||
|
||||
assert.Equal(t, false, pool.ValidatorExists(0))
|
||||
assert.Equal(t, true, pool.ValidatorExists(10))
|
||||
|
||||
@@ -166,6 +166,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
case "/eth/v1/beacon/pool/bls_to_execution_changes":
|
||||
endpoint.PostRequest = &SubmitBLSToExecutionChangesRequest{}
|
||||
endpoint.GetResponse = &BLSToExecutionChangesPoolResponseJson{}
|
||||
endpoint.Err = &IndexedVerificationFailureErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapBLSChangesArray,
|
||||
}
|
||||
|
||||
@@ -142,32 +142,25 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
capellaBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
if ok {
|
||||
if err := bs.submitBlindedCapellaBlock(ctx, capellaBlkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock:
|
||||
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock:
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// At the end we check forks that don't have blinded blocks.
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
@@ -220,29 +220,25 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
|
||||
defer span.End()
|
||||
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_BellatrixBlock:
|
||||
if err := bs.submitBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
capellaBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_CapellaBlock)
|
||||
if ok {
|
||||
if err := bs.submitCapellaBlock(ctx, capellaBlkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_CapellaBlock:
|
||||
if err := bs.submitCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
@@ -43,13 +44,6 @@ func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
@@ -86,13 +80,12 @@ func fillDBTestBlocksAltair(ctx context.Context, t *testing.T, beaconDB db.Datab
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -128,13 +121,28 @@ func fillDBTestBlocksBellatrix(ctx context.Context, t *testing.T, beaconDB db.Da
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayload = &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
Transactions: [][]byte{[]byte("transaction1"), []byte("transaction2")},
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -170,13 +178,42 @@ func fillDBTestBlocksCapella(ctx context.Context, t *testing.T, beaconDB db.Data
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayload = &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
Transactions: [][]byte{[]byte("transaction1"), []byte("transaction2")},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: bytesutil.PadTo([]byte("address1"), 20),
|
||||
Amount: 1,
|
||||
},
|
||||
{
|
||||
Index: 2,
|
||||
ValidatorIndex: 2,
|
||||
Address: bytesutil.PadTo([]byte("address2"), 20),
|
||||
Amount: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -212,13 +249,28 @@ func fillDBTestBlocksBellatrixBlinded(ctx context.Context, t *testing.T, beaconD
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayloadHeader = &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte("transactions_root"), 32),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -254,13 +306,29 @@ func fillDBTestBlocksCapellaBlinded(ctx context.Context, t *testing.T, beaconDB
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayloadHeader = &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte("transactions_root"), 32),
|
||||
WithdrawalsRoot: bytesutil.PadTo([]byte("withdrawals_root"), 32),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
|
||||
@@ -89,6 +89,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
case version.Capella:
|
||||
pb, err := data.SignedBlock.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
phBlk, ok := pb.(*ethpb.SignedBeaconBlockCapella)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockCapella")
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
@@ -136,6 +147,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: p}
|
||||
case *ethpb.SignedBeaconBlockBellatrix:
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||
case *ethpb.SignedBeaconBlockCapella:
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -112,6 +112,48 @@ func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.BeaconConfig())
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -154,3 +196,46 @@ func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 32)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wrappedBlk := util.SaveBlock(t, ctx, db, b)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{
|
||||
VerifiedOnly: true,
|
||||
}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package stategen
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -41,7 +42,8 @@ func (_ *State) replayBlocks(
|
||||
"endSlot": targetSlot,
|
||||
"diff": targetSlot - state.Slot(),
|
||||
})
|
||||
log.Debug("Replaying state")
|
||||
log.Debugf("Replaying state at %s", debug.Stack())
|
||||
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
if len(signed) > 0 {
|
||||
for i := len(signed) - 1; i >= 0; i-- {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -46,11 +45,6 @@ func (s *Service) validateBlsToExecutionChange(ctx context.Context, pid peer.ID,
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
// Ignore messages if our current head state doesn't support
|
||||
// capella.
|
||||
if st.Version() < version.Capella {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
// Validate that the execution change object is valid.
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, blsChange)
|
||||
if err != nil {
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Non-capella Head state",
|
||||
name: "Non-Capella HeadState Valid Execution Change Message",
|
||||
svc: NewService(context.Background(),
|
||||
WithP2P(mockp2p.NewTestP2P(t)),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
@@ -161,13 +161,23 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New())
|
||||
s.cfg.beaconDB = beaconDB
|
||||
s.initCaches()
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 128)
|
||||
st, keys := util.DeterministicGenesisStateBellatrix(t, 128)
|
||||
s.cfg.chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
State: st,
|
||||
}
|
||||
|
||||
msg.Message.ValidatorIndex = 50
|
||||
// Provide invalid withdrawal key for validator
|
||||
msg.Message.FromBlsPubkey = keys[51].PublicKey().Marshal()
|
||||
msg.Message.ToExecutionAddress = wantedExecAddress
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
htr, err := signing.SigningData(msg.Message.HashTreeRoot, domain)
|
||||
assert.NoError(t, err)
|
||||
msg.Signature = keys[51].Sign(htr[:]).Marshal()
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
@@ -182,7 +192,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
want: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "Non-existent Validator Index",
|
||||
|
||||
@@ -5,6 +5,7 @@ package flags
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -246,4 +247,10 @@ var (
|
||||
"WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. " +
|
||||
"Incorrect usage will result in your node experience consensus failure.",
|
||||
}
|
||||
// SlasherDirFlag defines a path on disk where the slasher database is stored.
|
||||
SlasherDirFlag = &cli.StringFlag{
|
||||
Name: "slasher-datadir",
|
||||
Usage: "Directory for the slasher database",
|
||||
Value: cmd.DefaultDataDir(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -130,6 +130,7 @@ var appFlags = []cli.Flag{
|
||||
checkpoint.RemoteURL,
|
||||
genesis.StatePath,
|
||||
genesis.BeaconAPIURL,
|
||||
flags.SlasherDirFlag,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -126,6 +126,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.MaxBuilderEpochMissedSlots,
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.SlasherDirFlag,
|
||||
checkpoint.BlockPath,
|
||||
checkpoint.StatePath,
|
||||
checkpoint.RemoteURL,
|
||||
|
||||
@@ -15,8 +15,8 @@ go_library(
|
||||
"//cmd/prysmctl/db:go_default_library",
|
||||
"//cmd/prysmctl/deprecated:go_default_library",
|
||||
"//cmd/prysmctl/p2p:go_default_library",
|
||||
"//cmd/prysmctl/signing:go_default_library",
|
||||
"//cmd/prysmctl/testnet:go_default_library",
|
||||
"//cmd/prysmctl/validator:go_default_library",
|
||||
"//cmd/prysmctl/weaksubjectivity:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/db"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/testnet"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/validator"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/weaksubjectivity"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -36,5 +36,5 @@ func init() {
|
||||
prysmctlCommands = append(prysmctlCommands, p2p.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, weaksubjectivity.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, signing.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, validator.Commands...)
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cmd.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/signing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/accounts:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//runtime/tos:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,64 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/tos"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "sign",
|
||||
Usage: "signs a message and broadcasts it to the network through the beacon node",
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "voluntary-exit",
|
||||
Description: "Performs a voluntary exit on selected accounts",
|
||||
Flags: cmd.WrapFlags([]cli.Flag{
|
||||
flags.WalletDirFlag,
|
||||
flags.WalletPasswordFileFlag,
|
||||
flags.AccountPasswordFileFlag,
|
||||
flags.VoluntaryExitPublicKeysFlag,
|
||||
flags.BeaconRPCProviderFlag,
|
||||
flags.Web3SignerURLFlag,
|
||||
flags.Web3SignerPublicValidatorKeysFlag,
|
||||
flags.InteropNumValidators,
|
||||
flags.InteropStartIndex,
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
flags.CertFlag,
|
||||
flags.GrpcHeadersFlag,
|
||||
flags.GrpcRetriesFlag,
|
||||
flags.GrpcRetryDelayFlag,
|
||||
flags.ExitAllFlag,
|
||||
flags.ForceExitFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accounts.AccountsExit(cliCtx, os.Stdin); err != nil {
|
||||
log.WithError(err).Fatal("Could not perform voluntary exit")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
41
cmd/prysmctl/validator/BUILD.bazel
Normal file
41
cmd/prysmctl/validator/BUILD.bazel
Normal file
@@ -0,0 +1,41 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cmd.go",
|
||||
"withdraw.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/accounts:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//runtime/tos:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_logrusorgru_aurora//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["withdraw_test.go"],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
137
cmd/prysmctl/validator/cmd.go
Normal file
137
cmd/prysmctl/validator/cmd.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/tos"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
BeaconHostFlag = &cli.StringFlag{
|
||||
Name: "beacon-node-host",
|
||||
Usage: "host:port for beacon node to query",
|
||||
Value: "127.0.0.1:3500",
|
||||
}
|
||||
PathFlag = &cli.StringFlag{
|
||||
Name: "path",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "path to the signed withdrawal messages JSON",
|
||||
}
|
||||
ConfirmFlag = &cli.BoolFlag{
|
||||
Name: "confirm",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "WARNING: User confirms and accepts responsibility of all input data provided and actions for setting their withdrawal address for their validator key. " +
|
||||
"This action is not reversible and withdrawal addresses can not be changed once set.",
|
||||
}
|
||||
VerifyOnlyFlag = &cli.BoolFlag{
|
||||
Name: "verify-only",
|
||||
Aliases: []string{"vo"},
|
||||
Usage: "overrides withdrawal command to only verify whether requests are in the pool and does not submit withdrawal requests",
|
||||
}
|
||||
)
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "validator",
|
||||
Aliases: []string{"v", "sign"}, // remove sign command should be depreciated but having as backwards compatability.
|
||||
Usage: "commands that affect the state of validators such as exiting or withdrawing",
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "withdraw",
|
||||
Aliases: []string{"w"},
|
||||
Usage: "Assign Ethereum withdrawal addresses to validator keys. WARNING: once set values are included they can no longer be updated.",
|
||||
Flags: []cli.Flag{
|
||||
BeaconHostFlag,
|
||||
PathFlag,
|
||||
ConfirmFlag,
|
||||
VerifyOnlyFlag,
|
||||
cmd.ConfigFileFlag,
|
||||
cmd.AcceptTosFlag,
|
||||
},
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
au := aurora.NewAurora(true)
|
||||
if !cliCtx.Bool(cmd.AcceptTosFlag.Name) || !cliCtx.Bool(ConfirmFlag.Name) {
|
||||
fmt.Println(au.Red("===============IMPORTANT==============="))
|
||||
fmt.Println(au.Red("Please read the following carefully"))
|
||||
fmt.Print("This action will allow the partial withdrawal of amounts over the 32 staked ETH in your active validator balance. \n" +
|
||||
"You will also be entitled to the full withdrawal of the entire validator balance if your validator has exited. \n" +
|
||||
"Please navigate to our website (https://docs.prylabs.network/) and make sure you understand the full implications of setting your withdrawal address. \n")
|
||||
fmt.Println(au.Red("THIS ACTION WILL NOT BE REVERSIBLE ONCE INCLUDED. "))
|
||||
fmt.Println(au.Red("You will NOT be able to change the address again once changed. "))
|
||||
return fmt.Errorf("both the `--%s` and `--%s` flags are required to run this command. \n"+
|
||||
"By providing these flags the user has read and accepts the TERMS AND CONDITIONS: https://github.com/prysmaticlabs/prysm/blob/master/TERMS_OF_SERVICE.md "+
|
||||
"and confirms the action of setting withdrawals addresses", cmd.AcceptTosFlag.Name, ConfirmFlag.Name)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if cliCtx.Bool(VerifyOnlyFlag.Name) {
|
||||
if err := verifyWithdrawalsInPool(cliCtx); err != nil {
|
||||
log.WithError(err).Fatal("Could not verify withdrawal addresses")
|
||||
}
|
||||
} else {
|
||||
if err := setWithdrawalAddresses(cliCtx); err != nil {
|
||||
log.WithError(err).Fatal("Could not set withdrawal addresses")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "exit",
|
||||
Aliases: []string{"e", "voluntary-exit"},
|
||||
Usage: "Performs a voluntary exit on selected accounts",
|
||||
Flags: cmd.WrapFlags([]cli.Flag{
|
||||
flags.WalletDirFlag,
|
||||
flags.WalletPasswordFileFlag,
|
||||
flags.AccountPasswordFileFlag,
|
||||
flags.VoluntaryExitPublicKeysFlag,
|
||||
flags.BeaconRPCProviderFlag,
|
||||
flags.Web3SignerURLFlag,
|
||||
flags.Web3SignerPublicValidatorKeysFlag,
|
||||
flags.InteropNumValidators,
|
||||
flags.InteropStartIndex,
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
flags.CertFlag,
|
||||
flags.GrpcHeadersFlag,
|
||||
flags.GrpcRetriesFlag,
|
||||
flags.GrpcRetryDelayFlag,
|
||||
flags.ExitAllFlag,
|
||||
flags.ForceExitFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accounts.AccountsExit(cliCtx, os.Stdin); err != nil {
|
||||
log.WithError(err).Fatal("Could not perform voluntary exit")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
1
cmd/prysmctl/validator/testdata/change-operations-multiple.json
vendored
Normal file
1
cmd/prysmctl/validator/testdata/change-operations-multiple.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
[{"message":{"validator_index":"0","from_bls_pubkey":"0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xb6e640f0fc58e9f22585dbf434b6a0e8fc36b98e2f2a963e158716cfc84034141289f7898027de1ec56754937f1a837e01c7b066a6a56af7a379f8aec823d050788a5ecc799e9bc39f73d45b7c389c961cbaace61823e4c7bf2f93bd06c03127"},{"message":{"validator_index":"1","from_bls_pubkey":"0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xa97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5"}]
|
||||
1
cmd/prysmctl/validator/testdata/change-operations-multiple_notfound.json
vendored
Normal file
1
cmd/prysmctl/validator/testdata/change-operations-multiple_notfound.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
[{"message":{"validator_index":"3","from_bls_pubkey":"0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xb6e640f0fc58e9f22585dbf434b6a0e8fc36b98e2f2a963e158716cfc84034141289f7898027de1ec56754937f1a837e01c7b066a6a56af7a379f8aec823d050788a5ecc799e9bc39f73d45b7c389c961cbaace61823e4c7bf2f93bd06c03127"},{"message":{"validator_index":"5","from_bls_pubkey":"0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xa97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5"}]
|
||||
1
cmd/prysmctl/validator/testdata/change-operations.json
vendored
Executable file
1
cmd/prysmctl/validator/testdata/change-operations.json
vendored
Executable file
@@ -0,0 +1 @@
|
||||
[{"message":{"validator_index":"1","from_bls_pubkey":"0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xa97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5"}]
|
||||
1
cmd/prysmctl/validator/testdata/staking-cli-change-operations-multiple.json
vendored
Normal file
1
cmd/prysmctl/validator/testdata/staking-cli-change-operations-multiple.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
[{"message":{"validator_index":"0","from_bls_pubkey":"a99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c","to_execution_address":"a94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"b6e640f0fc58e9f22585dbf434b6a0e8fc36b98e2f2a963e158716cfc84034141289f7898027de1ec56754937f1a837e01c7b066a6a56af7a379f8aec823d050788a5ecc799e9bc39f73d45b7c389c961cbaace61823e4c7bf2f93bd06c03127", "metadata":{ "network_name": "mainnet", "genesis_validators_root": "4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95", "deposit_cli_version": "2.4.0"}},{"message":{"validator_index":"1","from_bls_pubkey":"b89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"a94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"a97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5", "metadata":{ "network_name": "mainnet", "genesis_validators_root": "4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95", "deposit_cli_version": "2.4.0"}}]
|
||||
171
cmd/prysmctl/validator/withdraw.go
Normal file
171
cmd/prysmctl/validator/withdraw.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func setWithdrawalAddresses(c *cli.Context) error {
|
||||
ctx, span := trace.StartSpan(c.Context, "withdrawal.setWithdrawalAddresses")
|
||||
defer span.End()
|
||||
au := aurora.NewAurora(true)
|
||||
beaconNodeHost := c.String(BeaconHostFlag.Name)
|
||||
if !c.IsSet(PathFlag.Name) {
|
||||
return fmt.Errorf("no --%s flag value was provided", PathFlag.Name)
|
||||
}
|
||||
setWithdrawalAddressJsons, err := getWithdrawalMessagesFromPathFlag(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, request := range setWithdrawalAddressJsons {
|
||||
fmt.Println("SETTING VALIDATOR INDEX " + au.Red(request.Message.ValidatorIndex).String() + " TO WITHDRAWAL ADDRESS " + au.Red(request.Message.ToExecutionAddress).String())
|
||||
}
|
||||
return callWithdrawalEndpoints(ctx, beaconNodeHost, setWithdrawalAddressJsons)
|
||||
}
|
||||
|
||||
func getWithdrawalMessagesFromPathFlag(c *cli.Context) ([]*apimiddleware.SignedBLSToExecutionChangeJson, error) {
|
||||
setWithdrawalAddressJsons := make([]*apimiddleware.SignedBLSToExecutionChangeJson, 0)
|
||||
foundFilePaths, err := findWithdrawalFiles(c.String(PathFlag.Name))
|
||||
if err != nil {
|
||||
return setWithdrawalAddressJsons, errors.Wrap(err, "failed to find withdrawal files")
|
||||
}
|
||||
for _, foundFilePath := range foundFilePaths {
|
||||
b, err := os.ReadFile(filepath.Clean(foundFilePath))
|
||||
if err != nil {
|
||||
return setWithdrawalAddressJsons, errors.Wrap(err, "failed to open file")
|
||||
}
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
if err := json.Unmarshal(b, &to); err != nil {
|
||||
log.Warnf("provided file: %s, is not a list of signed withdrawal messages", foundFilePath)
|
||||
continue
|
||||
}
|
||||
// verify 0x from file and add if needed
|
||||
for i, obj := range to {
|
||||
if len(obj.Message.FromBLSPubkey) == fieldparams.BLSPubkeyLength*2 {
|
||||
to[i].Message.FromBLSPubkey = fmt.Sprintf("0x%s", obj.Message.FromBLSPubkey)
|
||||
}
|
||||
if len(obj.Message.ToExecutionAddress) == common.AddressLength*2 {
|
||||
to[i].Message.ToExecutionAddress = fmt.Sprintf("0x%s", obj.Message.ToExecutionAddress)
|
||||
}
|
||||
if len(obj.Signature) == fieldparams.BLSSignatureLength*2 {
|
||||
to[i].Signature = fmt.Sprintf("0x%s", obj.Signature)
|
||||
}
|
||||
setWithdrawalAddressJsons = append(setWithdrawalAddressJsons, &apimiddleware.SignedBLSToExecutionChangeJson{
|
||||
Message: &apimiddleware.BLSToExecutionChangeJson{
|
||||
ValidatorIndex: to[i].Message.ValidatorIndex,
|
||||
FromBLSPubkey: to[i].Message.FromBLSPubkey,
|
||||
ToExecutionAddress: to[i].Message.ToExecutionAddress,
|
||||
},
|
||||
Signature: to[i].Signature,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
if len(setWithdrawalAddressJsons) == 0 {
|
||||
return setWithdrawalAddressJsons, errors.New("the list of signed requests is empty")
|
||||
}
|
||||
return setWithdrawalAddressJsons, nil
|
||||
}
|
||||
|
||||
func callWithdrawalEndpoints(ctx context.Context, host string, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
client, err := beacon.NewClient(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.SubmitChangeBLStoExecution(ctx, request); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Successfully published messages to update %d withdrawal addresses.", len(request))
|
||||
return checkIfWithdrawsAreInPool(ctx, client, request)
|
||||
}
|
||||
|
||||
func checkIfWithdrawsAreInPool(ctx context.Context, client *beacon.Client, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
log.Info("Verifying requested withdrawal messages known to node...")
|
||||
poolResponse, err := client.GetBLStoExecutionChanges(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
requestMap := make(map[string]string)
|
||||
for _, w := range request {
|
||||
requestMap[w.Message.ValidatorIndex] = w.Message.ToExecutionAddress
|
||||
}
|
||||
totalMessages := len(requestMap)
|
||||
for _, resp := range poolResponse.Data {
|
||||
value, found := requestMap[resp.Message.ValidatorIndex]
|
||||
if found && value == resp.Message.ToExecutionAddress {
|
||||
delete(requestMap, resp.Message.ValidatorIndex)
|
||||
}
|
||||
}
|
||||
if len(requestMap) != 0 {
|
||||
for key, address := range requestMap {
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": key,
|
||||
"execution_address:": address,
|
||||
}).Warn("Set withdrawal address message not found in the node's operations pool.")
|
||||
}
|
||||
log.Warn("Please check before resubmitting. Set withdrawal address messages that were not found in the pool may have been already included into a block.")
|
||||
} else {
|
||||
log.Infof("All (total:%d) signed withdrawal messages were found in the pool.", totalMessages)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findWithdrawalFiles(path string) ([]string, error) {
|
||||
var foundpaths []string
|
||||
maxdepth := 3
|
||||
cleanpath := filepath.Clean(path)
|
||||
if err := filepath.WalkDir(cleanpath, func(s string, d fs.DirEntry, e error) error {
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if d.IsDir() && strings.Count(cleanpath, string(os.PathSeparator)) > maxdepth {
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
if filepath.Ext(d.Name()) == ".json" {
|
||||
foundpaths = append(foundpaths, s)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to find compatible files")
|
||||
}
|
||||
if len(foundpaths) == 0 {
|
||||
return nil, errors.New("no compatible files were found")
|
||||
}
|
||||
log.Infof("found JSON files for setting withdrawals: %v", foundpaths)
|
||||
return foundpaths, nil
|
||||
}
|
||||
|
||||
func verifyWithdrawalsInPool(c *cli.Context) error {
|
||||
ctx, span := trace.StartSpan(c.Context, "withdrawal.verifyWithdrawalsInPool")
|
||||
defer span.End()
|
||||
beaconNodeHost := c.String(BeaconHostFlag.Name)
|
||||
if !c.IsSet(PathFlag.Name) {
|
||||
return fmt.Errorf("no --%s flag value was provided", PathFlag.Name)
|
||||
}
|
||||
client, err := beacon.NewClient(beaconNodeHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request, err := getWithdrawalMessagesFromPathFlag(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return checkIfWithdrawsAreInPool(ctx, client, request)
|
||||
}
|
||||
305
cmd/prysmctl/validator/withdraw_test.go
Normal file
305
cmd/prysmctl/validator/withdraw_test.go
Normal file
@@ -0,0 +1,305 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
logtest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestCallWithdrawalEndpoint(t *testing.T) {
|
||||
file := "./testdata/change-operations.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodGet {
|
||||
b, err := os.ReadFile(filepath.Clean(file))
|
||||
require.NoError(t, err)
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
err = json.Unmarshal(b, &to)
|
||||
require.NoError(t, err)
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.BLSToExecutionChangesPoolResponseJson{
|
||||
Data: to,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", file, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", file))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.LogsContain(t, hook, "Successfully published")
|
||||
}
|
||||
|
||||
func TestCallWithdrawalEndpoint_Mutiple(t *testing.T) {
|
||||
file := "./testdata/change-operations-multiple.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodGet {
|
||||
b, err := os.ReadFile(filepath.Clean(file))
|
||||
require.NoError(t, err)
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
err = json.Unmarshal(b, &to)
|
||||
require.NoError(t, err)
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.BLSToExecutionChangesPoolResponseJson{
|
||||
Data: to,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", file, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", file))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "Successfully published")
|
||||
assert.LogsContain(t, hook, "to update 2 withdrawal")
|
||||
assert.LogsContain(t, hook, "All (total:2) signed withdrawal messages were found in the pool.")
|
||||
assert.LogsDoNotContain(t, hook, "Set withdrawal address message not found in the node's operations pool.")
|
||||
}
|
||||
|
||||
func TestCallWithdrawalEndpoint_Mutiple_stakingcli(t *testing.T) {
|
||||
stakingcliFile := "./testdata/staking-cli-change-operations-multiple.json"
|
||||
file := "./testdata/change-operations-multiple.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodGet {
|
||||
b, err := os.ReadFile(filepath.Clean(file))
|
||||
require.NoError(t, err)
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
err = json.Unmarshal(b, &to)
|
||||
require.NoError(t, err)
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.BLSToExecutionChangesPoolResponseJson{
|
||||
Data: to,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", stakingcliFile, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", stakingcliFile))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "Successfully published")
|
||||
assert.LogsContain(t, hook, "to update 2 withdrawal")
|
||||
assert.LogsContain(t, hook, "All (total:2) signed withdrawal messages were found in the pool.")
|
||||
assert.LogsDoNotContain(t, hook, "Set withdrawal address message not found in the node's operations pool.")
|
||||
}
|
||||
|
||||
func TestCallWithdrawalEndpoint_Mutiple_notfound(t *testing.T) {
|
||||
respFile := "./testdata/change-operations-multiple_notfound.json"
|
||||
file := "./testdata/change-operations-multiple.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodGet {
|
||||
b, err := os.ReadFile(filepath.Clean(respFile))
|
||||
require.NoError(t, err)
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
err = json.Unmarshal(b, &to)
|
||||
require.NoError(t, err)
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.BLSToExecutionChangesPoolResponseJson{
|
||||
Data: to,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", file, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", file))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "Successfully published")
|
||||
assert.LogsContain(t, hook, "to update 2 withdrawal")
|
||||
assert.LogsContain(t, hook, "Set withdrawal address message not found in the node's operations pool.")
|
||||
assert.LogsContain(t, hook, "Please check before resubmitting. Set withdrawal address messages that were not found in the pool may have been already included into a block.")
|
||||
assert.LogsDoNotContain(t, hook, "Set withdrawal address message found in the node's operations pool.")
|
||||
}
|
||||
|
||||
func TestCallWithdrawalEndpoint_Empty(t *testing.T) {
|
||||
baseurl := "127.0.0.1:3500"
|
||||
content := []byte("[]")
|
||||
tmpfile, err := os.CreateTemp("./testdata", "*.json")
|
||||
require.NoError(t, err)
|
||||
_, err = tmpfile.Write(content)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.Remove(tmpfile.Name())
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", tmpfile.Name(), "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", tmpfile.Name()))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
assert.ErrorContains(t, "the list of signed requests is empty", err)
|
||||
}
|
||||
|
||||
func TestCallWithdrawalEndpoint_Errors(t *testing.T) {
|
||||
file := "./testdata/change-operations.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(400)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.IndexedVerificationFailureErrorJson{
|
||||
Failures: []*apimiddleware.SingleIndexedVerificationFailureJson{
|
||||
{Index: 0, Message: "Could not validate SignedBLSToExecutionChange"},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", file, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", file))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = setWithdrawalAddresses(cliCtx)
|
||||
assert.ErrorContains(t, "POST error", err)
|
||||
|
||||
assert.LogsContain(t, hook, "Could not validate SignedBLSToExecutionChange")
|
||||
}
|
||||
|
||||
func TestVerifyWithdrawal_Mutiple(t *testing.T) {
|
||||
file := "./testdata/change-operations-multiple.json"
|
||||
baseurl := "127.0.0.1:3500"
|
||||
l, err := net.Listen("tcp", baseurl)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodGet {
|
||||
b, err := os.ReadFile(filepath.Clean(file))
|
||||
require.NoError(t, err)
|
||||
var to []*apimiddleware.SignedBLSToExecutionChangeJson
|
||||
err = json.Unmarshal(b, &to)
|
||||
require.NoError(t, err)
|
||||
err = json.NewEncoder(w).Encode(&apimiddleware.BLSToExecutionChangesPoolResponseJson{
|
||||
Data: to,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
err = srv.Listener.Close()
|
||||
require.NoError(t, err)
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
defer srv.Close()
|
||||
hook := logtest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("beacon-node-host", baseurl, "")
|
||||
set.String("path", file, "")
|
||||
set.Bool("confirm", true, "")
|
||||
set.Bool("accept-terms-of-use", true, "")
|
||||
set.Bool("verify-only", true, "")
|
||||
assert.NoError(t, set.Set("beacon-node-host", baseurl))
|
||||
assert.NoError(t, set.Set("path", file))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
err = verifyWithdrawalsInPool(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "All (total:2) signed withdrawal messages were found in the pool.")
|
||||
assert.LogsDoNotContain(t, hook, "set withdrawal address message not found in the node's operations pool.")
|
||||
}
|
||||
@@ -186,7 +186,7 @@ var Commands = &cli.Command{
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
log.Info("This command will be deprecated in the future in favor of `prysmctl sign validator-exit`")
|
||||
log.Info("This command will be deprecated in the future in favor of `prysmctl validator exit`")
|
||||
if err := AccountsExit(cliCtx, os.Stdin); err != nil {
|
||||
log.WithError(err).Fatal("Could not perform voluntary exit")
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ your flag since you're going to invert the flag in a later step. i.e you will us
|
||||
later. For example, `--enable-my-feature`. Additionally, [create a feature flag tracking issue](https://github.com/prysmaticlabs/prysm/issues/new?template=feature_flag.md)
|
||||
for your feature using the appropriate issue template.
|
||||
2. Use the feature throughout the application to enable your new functionality and be sure to write
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new funcitonality without losing
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new functionality without losing
|
||||
coverage on the existing functionality. This is considered an opt-in feature flag. Example usage:
|
||||
```go
|
||||
func someExistingMethod(ctx context.Context) error {
|
||||
@@ -58,4 +58,4 @@ the config value in shared/featureconfig/config.go.
|
||||
deprecate the opt-out feature flag, delete the config field from shared/featureconfig/config.go,
|
||||
delete any deprecated / obsolete code paths.
|
||||
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
|
||||
@@ -174,6 +174,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
disableBroadcastSlashingFlag,
|
||||
enableSlasherFlag,
|
||||
enableHistoricalSpaceRepresentation,
|
||||
disableStakinContractCheck,
|
||||
disablePullTips,
|
||||
disableVecHTR,
|
||||
disableForkChoiceDoublyLinkedTree,
|
||||
|
||||
@@ -33,6 +33,7 @@ func SepoliaConfig() *BeaconChainConfig {
|
||||
cfg.AltairForkVersion = []byte{0x90, 0x00, 0x00, 0x70}
|
||||
cfg.BellatrixForkEpoch = 100
|
||||
cfg.BellatrixForkVersion = []byte{0x90, 0x00, 0x00, 0x71}
|
||||
cfg.CapellaForkVersion = []byte{0x90, 0x00, 0x00, 0x72}
|
||||
cfg.TerminalTotalDifficulty = "17000000000000000"
|
||||
cfg.DepositContractAddress = "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D"
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
@@ -16,25 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func Test_SignedBeaconBlock_SetBlock(t *testing.T) {
|
||||
b := ð.BeaconBlockCapella{Slot: 1, Body: ð.BeaconBlockBodyCapella{ExecutionPayload: &pb.ExecutionPayloadCapella{}}}
|
||||
wb, err := NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
wsb, err := NewSignedBeaconBlock(ð.SignedBeaconBlockCapella{
|
||||
Block: ð.BeaconBlockCapella{StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte("parent"), 32),
|
||||
Body: ð.BeaconBlockBodyCapella{
|
||||
RandaoReveal: make([]byte, fieldparams.BLSSignatureLength),
|
||||
Graffiti: make([]byte, fieldparams.RootLength),
|
||||
ExecutionPayload: &pb.ExecutionPayloadCapella{},
|
||||
}},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wsb.SetBlock(wb))
|
||||
require.DeepEqual(t, wsb.Block(), wb)
|
||||
}
|
||||
|
||||
func Test_BeaconBlockIsNil(t *testing.T) {
|
||||
t.Run("not nil", func(t *testing.T) {
|
||||
assert.NoError(t, BeaconBlockIsNil(&SignedBeaconBlock{block: &BeaconBlock{body: &BeaconBlockBody{}}}))
|
||||
|
||||
@@ -13,53 +13,6 @@ func (b *SignedBeaconBlock) SetSignature(sig []byte) {
|
||||
copy(b.signature[:], sig)
|
||||
}
|
||||
|
||||
// SetBlock sets the underlying beacon block object.
|
||||
// This function is not thread safe, it is only used during block creation.
|
||||
func (b *SignedBeaconBlock) SetBlock(blk interfaces.BeaconBlock) error {
|
||||
copied, err := blk.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.block.slot = copied.Slot()
|
||||
b.block.parentRoot = copied.ParentRoot()
|
||||
b.block.stateRoot = copied.StateRoot()
|
||||
b.block.proposerIndex = copied.ProposerIndex()
|
||||
b.block.body.randaoReveal = copied.Body().RandaoReveal()
|
||||
b.block.body.eth1Data = copied.Body().Eth1Data()
|
||||
b.block.body.graffiti = copied.Body().Graffiti()
|
||||
b.block.body.proposerSlashings = copied.Body().ProposerSlashings()
|
||||
b.block.body.attesterSlashings = copied.Body().AttesterSlashings()
|
||||
b.block.body.attestations = copied.Body().Attestations()
|
||||
b.block.body.deposits = copied.Body().Deposits()
|
||||
b.block.body.voluntaryExits = copied.Body().VoluntaryExits()
|
||||
if b.version >= version.Altair {
|
||||
syncAggregate, err := copied.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.block.body.syncAggregate = syncAggregate
|
||||
}
|
||||
if b.version >= version.Bellatrix {
|
||||
executionData, err := copied.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.block.body.isBlinded {
|
||||
b.block.body.executionPayloadHeader = executionData
|
||||
} else {
|
||||
b.block.body.executionPayload = executionData
|
||||
}
|
||||
}
|
||||
if b.version >= version.Capella {
|
||||
changes, err := copied.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.block.body.blsToExecutionChanges = changes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSlot sets the respective slot of the block.
|
||||
// This function is not thread safe, it is only used during block creation.
|
||||
func (b *BeaconBlock) SetSlot(slot types.Slot) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
// a signed beacon block.
|
||||
type SignedBeaconBlock interface {
|
||||
Block() BeaconBlock
|
||||
SetBlock(BeaconBlock) error
|
||||
Signature() [field_params.BLSSignatureLength]byte
|
||||
SetSignature(sig []byte)
|
||||
IsNil() bool
|
||||
|
||||
@@ -22,10 +22,6 @@ func (m SignedBeaconBlock) Block() interfaces.BeaconBlock {
|
||||
return m.BeaconBlock
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) SetBlock(interfaces.BeaconBlock) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) Signature() [field_params.BLSSignatureLength]byte {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -113,22 +109,6 @@ func (BeaconBlock) AsSignRequestObject() (validatorpb.SignRequestObject, error)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) Copy() (interfaces.BeaconBlock, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetSlot(types.Slot) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetProposerIndex(types.ValidatorIndex) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetParentRoot([]byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m BeaconBlock) HashTreeRoot() ([field_params.RootLength]byte, error) {
|
||||
return m.Htr, m.HtrErr
|
||||
}
|
||||
@@ -161,10 +141,6 @@ func (BeaconBlock) IsBlinded() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetBlinded(bool) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) Proto() (proto.Message, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -193,13 +169,33 @@ func (BeaconBlock) Version() int {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
type BeaconBlockBody struct{}
|
||||
|
||||
func (BeaconBlockBody) RandaoReveal() [field_params.BLSSignatureLength]byte {
|
||||
func (BeaconBlock) ToBlinded() (interfaces.BeaconBlock, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetRandaoReveal([]byte) {
|
||||
func (BeaconBlock) SetSlot(_ types.Slot) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetProposerIndex(_ types.ValidatorIndex) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetParentRoot(_ []byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) SetBlinded(_ bool) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlock) Copy() (interfaces.BeaconBlock, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
type BeaconBlockBody struct{}
|
||||
|
||||
func (BeaconBlockBody) RandaoReveal() [field_params.BLSSignatureLength]byte {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -207,66 +203,34 @@ func (BeaconBlockBody) Eth1Data() *eth.Eth1Data {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetEth1Data(*eth.Eth1Data) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) Graffiti() [field_params.RootLength]byte {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetGraffiti([]byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) ProposerSlashings() []*eth.ProposerSlashing {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetProposerSlashings([]*eth.ProposerSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) AttesterSlashings() []*eth.AttesterSlashing {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetAttesterSlashings([]*eth.AttesterSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) Attestations() []*eth.Attestation {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetAttestations([]*eth.Attestation) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) Deposits() []*eth.Deposit {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetDeposits([]*eth.Deposit) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) VoluntaryExits() []*eth.SignedVoluntaryExit {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetVoluntaryExits([]*eth.SignedVoluntaryExit) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SyncAggregate() (*eth.SyncAggregate, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetSyncAggregate(*eth.SyncAggregate) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) IsNil() bool {
|
||||
return false
|
||||
}
|
||||
@@ -283,22 +247,58 @@ func (BeaconBlockBody) Execution() (interfaces.ExecutionData, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetExecution(interfaces.ExecutionData) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) BLSToExecutionChanges() ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (BeaconBlockBody) SetBLSToExecutionChanges([]*eth.SignedBLSToExecutionChange) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlock) SetStateRoot(root []byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetRandaoReveal([]byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetEth1Data(*eth.Eth1Data) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetGraffiti([]byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetProposerSlashings([]*eth.ProposerSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetAttesterSlashings([]*eth.AttesterSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetAttestations([]*eth.Attestation) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetDeposits([]*eth.Deposit) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetVoluntaryExits([]*eth.SignedVoluntaryExit) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetSyncAggregate(*eth.SyncAggregate) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetExecution(interfaces.ExecutionData) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBody) SetBLSToExecutionChanges([]*eth.SignedBLSToExecutionChange) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ interfaces.SignedBeaconBlock = &SignedBeaconBlock{}
|
||||
var _ interfaces.BeaconBlock = &BeaconBlock{}
|
||||
var _ interfaces.BeaconBlockBody = &BeaconBlockBody{}
|
||||
|
||||
@@ -213,3 +213,38 @@ func BenchmarkToBytes32(b *testing.B) {
|
||||
bytesutil.ToBytes32(x)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBytes48Array(t *testing.T) {
|
||||
tests := []struct {
|
||||
a [][]byte
|
||||
b [][48]byte
|
||||
}{
|
||||
{[][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{0}}},
|
||||
{[][]byte{{253, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{253}}},
|
||||
{[][]byte{{254, 255, 255, 255, 255, 255, 255, 127, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{254, 255, 255, 255, 255, 255, 255, 127}}},
|
||||
{[][]byte{{255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255}},
|
||||
[][48]byte{{255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
a := bytesutil.FromBytes48Array(tt.b)
|
||||
assert.DeepEqual(t, tt.a, a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,6 +143,30 @@ func TestBytes8(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes32(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0,
|
||||
[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{16777216,
|
||||
[]byte{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{4294967296,
|
||||
[]byte{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{4294967297,
|
||||
[]byte{1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{9223372036854775806,
|
||||
[]byte{254, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{9223372036854775807,
|
||||
[]byte{255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes32(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBool(t *testing.T) {
|
||||
tests := []byte{
|
||||
0,
|
||||
@@ -253,3 +277,26 @@ func TestBigIntToLittleEndianBytes(t *testing.T) {
|
||||
converted := bytesutil.BigIntToLittleEndianBytes(bigInt)
|
||||
assert.DeepEqual(t, expected, converted)
|
||||
}
|
||||
|
||||
func TestUint64ToBytesLittleEndian(t *testing.T) {
|
||||
tests := []struct {
|
||||
value uint64
|
||||
want [8]byte
|
||||
}{
|
||||
{
|
||||
value: 0x01000000,
|
||||
want: [8]byte{0, 0, 0, 1, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
value: 0x00000001,
|
||||
want: [8]byte{1, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("0x%08x", tt.value), func(t *testing.T) {
|
||||
if got := bytesutil.Uint64ToBytesLittleEndian(tt.value); !bytes.Equal(got, tt.want[:]) {
|
||||
t.Errorf("Uint64ToBytesLittleEndian() = got %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,6 +73,7 @@ gofmt -s -w "$mock_path/."
|
||||
beacon_api_mock_path="validator/client/beacon-api/mock"
|
||||
beacon_api_mocks=(
|
||||
"$beacon_api_mock_path/genesis_mock.go genesis.go"
|
||||
"$beacon_api_mock_path/duties_mock.go duties.go"
|
||||
"$beacon_api_mock_path/json_rest_handler_mock.go json_rest_handler.go"
|
||||
"$beacon_api_mock_path/state_validators_mock.go state_validators.go"
|
||||
)
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
FROM gcr.io/prysmaticlabs/build-agent AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY . /workspace/.
|
||||
|
||||
# Build binaries for minimal configuration.
|
||||
RUN bazel build --define ssz=minimal --jobs=auto --remote_cache= \
|
||||
//beacon-chain \
|
||||
//validator \
|
||||
//tools/interop/convert-keys
|
||||
|
||||
|
||||
FROM gcr.io/whiteblock/base:ubuntu1804
|
||||
|
||||
COPY --from=builder /workspace/bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain .
|
||||
COPY --from=builder /workspace/bazel-bin/validator/linux_amd64_pure_stripped/validator .
|
||||
COPY --from=builder /workspace/bazel-bin/tools/interop/convert-keys/linux_amd64_stripped/convert-keys .
|
||||
|
||||
RUN mkdir /launch
|
||||
|
||||
COPY hack/interop_start.sh /launch/start.sh
|
||||
|
||||
ENTRYPOINT ["start.sh"]
|
||||
@@ -214,6 +214,29 @@ type executionPayloadJSON struct {
|
||||
Transactions []hexutil.Bytes `json:"transactions"`
|
||||
}
|
||||
|
||||
type GetPayloadV2ResponseJson struct {
|
||||
ExecutionPayload *ExecutionPayloadCapellaJSON `json:"executionPayload"`
|
||||
BlockValue string `json:"blockValue"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapellaJSON struct {
|
||||
ParentHash *common.Hash `json:"parentHash"`
|
||||
FeeRecipient *common.Address `json:"feeRecipient"`
|
||||
StateRoot *common.Hash `json:"stateRoot"`
|
||||
ReceiptsRoot *common.Hash `json:"receiptsRoot"`
|
||||
LogsBloom *hexutil.Bytes `json:"logsBloom"`
|
||||
PrevRandao *common.Hash `json:"prevRandao"`
|
||||
BlockNumber *hexutil.Uint64 `json:"blockNumber"`
|
||||
GasLimit *hexutil.Uint64 `json:"gasLimit"`
|
||||
GasUsed *hexutil.Uint64 `json:"gasUsed"`
|
||||
Timestamp *hexutil.Uint64 `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
BaseFeePerGas string `json:"baseFeePerGas"`
|
||||
BlockHash *common.Hash `json:"blockHash"`
|
||||
Transactions []hexutil.Bytes `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
// MarshalJSON --
|
||||
func (e *ExecutionPayload) MarshalJSON() ([]byte, error) {
|
||||
transactions := make([]hexutil.Bytes, len(e.Transactions))
|
||||
@@ -251,6 +274,47 @@ func (e *ExecutionPayload) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON --
|
||||
func (e *ExecutionPayloadCapella) MarshalJSON() ([]byte, error) {
|
||||
transactions := make([]hexutil.Bytes, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
transactions[i] = tx
|
||||
}
|
||||
baseFee := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(e.BaseFeePerGas))
|
||||
baseFeeHex := hexutil.EncodeBig(baseFee)
|
||||
pHash := common.BytesToHash(e.ParentHash)
|
||||
sRoot := common.BytesToHash(e.StateRoot)
|
||||
recRoot := common.BytesToHash(e.ReceiptsRoot)
|
||||
prevRan := common.BytesToHash(e.PrevRandao)
|
||||
bHash := common.BytesToHash(e.BlockHash)
|
||||
blockNum := hexutil.Uint64(e.BlockNumber)
|
||||
gasLimit := hexutil.Uint64(e.GasLimit)
|
||||
gasUsed := hexutil.Uint64(e.GasUsed)
|
||||
timeStamp := hexutil.Uint64(e.Timestamp)
|
||||
recipient := common.BytesToAddress(e.FeeRecipient)
|
||||
logsBloom := hexutil.Bytes(e.LogsBloom)
|
||||
if e.Withdrawals == nil {
|
||||
e.Withdrawals = make([]*Withdrawal, 0)
|
||||
}
|
||||
return json.Marshal(ExecutionPayloadCapellaJSON{
|
||||
ParentHash: &pHash,
|
||||
FeeRecipient: &recipient,
|
||||
StateRoot: &sRoot,
|
||||
ReceiptsRoot: &recRoot,
|
||||
LogsBloom: &logsBloom,
|
||||
PrevRandao: &prevRan,
|
||||
BlockNumber: &blockNum,
|
||||
GasLimit: &gasLimit,
|
||||
GasUsed: &gasUsed,
|
||||
Timestamp: &timeStamp,
|
||||
ExtraData: e.ExtraData,
|
||||
BaseFeePerGas: baseFeeHex,
|
||||
BlockHash: &bHash,
|
||||
Transactions: transactions,
|
||||
Withdrawals: e.Withdrawals,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON --
|
||||
func (e *ExecutionPayload) UnmarshalJSON(enc []byte) error {
|
||||
dec := executionPayloadJSON{}
|
||||
@@ -324,12 +388,96 @@ func (e *ExecutionPayload) UnmarshalJSON(enc []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON --
|
||||
func (e *ExecutionPayloadCapella) UnmarshalJSON(enc []byte) error {
|
||||
dec := GetPayloadV2ResponseJson{}
|
||||
if err := json.Unmarshal(enc, &dec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dec.ExecutionPayload.ParentHash == nil {
|
||||
return errors.New("missing required field 'parentHash' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.FeeRecipient == nil {
|
||||
return errors.New("missing required field 'feeRecipient' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.StateRoot == nil {
|
||||
return errors.New("missing required field 'stateRoot' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.ReceiptsRoot == nil {
|
||||
return errors.New("missing required field 'receiptsRoot' for ExecutableDataV1")
|
||||
}
|
||||
if dec.ExecutionPayload.LogsBloom == nil {
|
||||
return errors.New("missing required field 'logsBloom' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.PrevRandao == nil {
|
||||
return errors.New("missing required field 'prevRandao' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.ExtraData == nil {
|
||||
return errors.New("missing required field 'extraData' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.BlockHash == nil {
|
||||
return errors.New("missing required field 'blockHash' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.Transactions == nil {
|
||||
return errors.New("missing required field 'transactions' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.BlockNumber == nil {
|
||||
return errors.New("missing required field 'blockNumber' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.Timestamp == nil {
|
||||
return errors.New("missing required field 'timestamp' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.GasUsed == nil {
|
||||
return errors.New("missing required field 'gasUsed' for ExecutionPayload")
|
||||
}
|
||||
if dec.ExecutionPayload.GasLimit == nil {
|
||||
return errors.New("missing required field 'gasLimit' for ExecutionPayload")
|
||||
}
|
||||
|
||||
*e = ExecutionPayloadCapella{}
|
||||
e.ParentHash = dec.ExecutionPayload.ParentHash.Bytes()
|
||||
e.FeeRecipient = dec.ExecutionPayload.FeeRecipient.Bytes()
|
||||
e.StateRoot = dec.ExecutionPayload.StateRoot.Bytes()
|
||||
e.ReceiptsRoot = dec.ExecutionPayload.ReceiptsRoot.Bytes()
|
||||
e.LogsBloom = *dec.ExecutionPayload.LogsBloom
|
||||
e.PrevRandao = dec.ExecutionPayload.PrevRandao.Bytes()
|
||||
e.BlockNumber = uint64(*dec.ExecutionPayload.BlockNumber)
|
||||
e.GasLimit = uint64(*dec.ExecutionPayload.GasLimit)
|
||||
e.GasUsed = uint64(*dec.ExecutionPayload.GasUsed)
|
||||
e.Timestamp = uint64(*dec.ExecutionPayload.Timestamp)
|
||||
e.ExtraData = dec.ExecutionPayload.ExtraData
|
||||
baseFee, err := hexutil.DecodeBig(dec.ExecutionPayload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.BaseFeePerGas = bytesutil.PadTo(bytesutil.ReverseByteOrder(baseFee.Bytes()), fieldparams.RootLength)
|
||||
e.BlockHash = dec.ExecutionPayload.BlockHash.Bytes()
|
||||
transactions := make([][]byte, len(dec.ExecutionPayload.Transactions))
|
||||
for i, tx := range dec.ExecutionPayload.Transactions {
|
||||
transactions[i] = tx
|
||||
}
|
||||
e.Transactions = transactions
|
||||
if dec.ExecutionPayload.Withdrawals == nil {
|
||||
dec.ExecutionPayload.Withdrawals = make([]*Withdrawal, 0)
|
||||
}
|
||||
e.Withdrawals = dec.ExecutionPayload.Withdrawals
|
||||
return nil
|
||||
}
|
||||
|
||||
type payloadAttributesJSON struct {
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
PrevRandao hexutil.Bytes `json:"prevRandao"`
|
||||
SuggestedFeeRecipient hexutil.Bytes `json:"suggestedFeeRecipient"`
|
||||
}
|
||||
|
||||
type payloadAttributesV2JSON struct {
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
PrevRandao hexutil.Bytes `json:"prevRandao"`
|
||||
SuggestedFeeRecipient hexutil.Bytes `json:"suggestedFeeRecipient"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
// MarshalJSON --
|
||||
func (p *PayloadAttributes) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(payloadAttributesJSON{
|
||||
@@ -339,6 +487,16 @@ func (p *PayloadAttributes) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON --
|
||||
func (p *PayloadAttributesV2) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(payloadAttributesV2JSON{
|
||||
Timestamp: hexutil.Uint64(p.Timestamp),
|
||||
PrevRandao: p.PrevRandao,
|
||||
SuggestedFeeRecipient: p.SuggestedFeeRecipient,
|
||||
Withdrawals: p.Withdrawals,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON --
|
||||
func (p *PayloadAttributes) UnmarshalJSON(enc []byte) error {
|
||||
dec := payloadAttributesJSON{}
|
||||
@@ -352,6 +510,19 @@ func (p *PayloadAttributes) UnmarshalJSON(enc []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PayloadAttributesV2) UnmarshalJSON(enc []byte) error {
|
||||
dec := payloadAttributesV2JSON{}
|
||||
if err := json.Unmarshal(enc, &dec); err != nil {
|
||||
return err
|
||||
}
|
||||
*p = PayloadAttributesV2{}
|
||||
p.Timestamp = uint64(dec.Timestamp)
|
||||
p.PrevRandao = dec.PrevRandao
|
||||
p.SuggestedFeeRecipient = dec.SuggestedFeeRecipient
|
||||
p.Withdrawals = dec.Withdrawals
|
||||
return nil
|
||||
}
|
||||
|
||||
type payloadStatusJSON struct {
|
||||
LatestValidHash *common.Hash `json:"latestValidHash"`
|
||||
Status string `json:"status"`
|
||||
|
||||
@@ -2,6 +2,7 @@ package enginev1_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -139,6 +140,71 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
|
||||
require.DeepEqual(t, hash, payloadPb.BlockHash)
|
||||
require.DeepEqual(t, [][]byte{[]byte("hi")}, payloadPb.Transactions)
|
||||
})
|
||||
t.Run("execution payload Capella", func(t *testing.T) {
|
||||
parentHash := common.BytesToHash([]byte("parent"))
|
||||
feeRecipient := common.BytesToAddress([]byte("feeRecipient"))
|
||||
stateRoot := common.BytesToHash([]byte("stateRoot"))
|
||||
receiptsRoot := common.BytesToHash([]byte("receiptsRoot"))
|
||||
logsBloom := hexutil.Bytes(bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength))
|
||||
random := common.BytesToHash([]byte("random"))
|
||||
extra := common.BytesToHash([]byte("extra"))
|
||||
hash := common.BytesToHash([]byte("hash"))
|
||||
bn := hexutil.Uint64(1)
|
||||
gl := hexutil.Uint64(2)
|
||||
gu := hexutil.Uint64(3)
|
||||
ts := hexutil.Uint64(4)
|
||||
|
||||
resp := &enginev1.GetPayloadV2ResponseJson{
|
||||
BlockValue: fmt.Sprint(123),
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapellaJSON{
|
||||
ParentHash: &parentHash,
|
||||
FeeRecipient: &feeRecipient,
|
||||
StateRoot: &stateRoot,
|
||||
ReceiptsRoot: &receiptsRoot,
|
||||
LogsBloom: &logsBloom,
|
||||
PrevRandao: &random,
|
||||
BlockNumber: &bn,
|
||||
GasLimit: &gl,
|
||||
GasUsed: &gu,
|
||||
Timestamp: &ts,
|
||||
ExtraData: hexutil.Bytes(extra[:]),
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &hash,
|
||||
Transactions: []hexutil.Bytes{{}},
|
||||
Withdrawals: []*enginev1.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: bytesutil.PadTo([]byte("address"), 20),
|
||||
Amount: 1,
|
||||
}},
|
||||
},
|
||||
}
|
||||
enc, err := json.Marshal(resp)
|
||||
require.NoError(t, err)
|
||||
payloadPb := &enginev1.ExecutionPayloadCapella{}
|
||||
require.NoError(t, json.Unmarshal(enc, payloadPb))
|
||||
require.DeepEqual(t, parentHash.Bytes(), payloadPb.ParentHash)
|
||||
require.DeepEqual(t, feeRecipient.Bytes(), payloadPb.FeeRecipient)
|
||||
require.DeepEqual(t, stateRoot.Bytes(), payloadPb.StateRoot)
|
||||
require.DeepEqual(t, receiptsRoot.Bytes(), payloadPb.ReceiptsRoot)
|
||||
require.DeepEqual(t, logsBloom, hexutil.Bytes(payloadPb.LogsBloom))
|
||||
require.DeepEqual(t, random.Bytes(), payloadPb.PrevRandao)
|
||||
require.DeepEqual(t, uint64(1), payloadPb.BlockNumber)
|
||||
require.DeepEqual(t, uint64(2), payloadPb.GasLimit)
|
||||
require.DeepEqual(t, uint64(3), payloadPb.GasUsed)
|
||||
require.DeepEqual(t, uint64(4), payloadPb.Timestamp)
|
||||
require.DeepEqual(t, extra.Bytes(), payloadPb.ExtraData)
|
||||
feePerGas := new(big.Int).SetBytes(payloadPb.BaseFeePerGas)
|
||||
require.Equal(t, "15832716547479101977395928904157292820330083199902421483727713169783165812736", feePerGas.String())
|
||||
require.DeepEqual(t, hash.Bytes(), payloadPb.BlockHash)
|
||||
require.DeepEqual(t, [][]byte{{}}, payloadPb.Transactions)
|
||||
require.Equal(t, 1, len(payloadPb.Withdrawals))
|
||||
withdrawal := payloadPb.Withdrawals[0]
|
||||
require.Equal(t, uint64(1), withdrawal.Index)
|
||||
require.Equal(t, types.ValidatorIndex(1), withdrawal.ValidatorIndex)
|
||||
require.DeepEqual(t, bytesutil.PadTo([]byte("address"), 20), withdrawal.Address)
|
||||
require.Equal(t, uint64(1), withdrawal.Amount)
|
||||
})
|
||||
t.Run("execution block", func(t *testing.T) {
|
||||
baseFeePerGas := big.NewInt(1770307273)
|
||||
want := &gethtypes.Header{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: ea151ac65a951845c7bff5e78aea6951ca6a0674af711b1b14fea69df1829e84
|
||||
// Hash: fb4dd6cca9018196eaa56d51f14f2e924edf2ea0e1e19e262b24651d34d0fbfd
|
||||
package v1
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: b0e2b862b471992f748a849f7e9246f72d77985e5d7e83fc704efd1aa1eeb9fa
|
||||
// Hash: aee43aed536b874e25f44d6a1c015d4064b7928ff1dd3bd0634687c4082215ef
|
||||
package eth
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 3c0d8421c1f6481f3d4ead7196377f9bc43d0813e6826e42f07e18d617494e1e
|
||||
// Hash: cd84f61a2f07139241137aeaa7b8bca4ff86118ce0fd0bd523c77c30518db21f
|
||||
package eth
|
||||
|
||||
import (
|
||||
|
||||
1672
proto/prysm/v1alpha1/validator.pb.go
generated
1672
proto/prysm/v1alpha1/validator.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -349,6 +349,9 @@ message StreamBlocksResponse {
|
||||
|
||||
// Representing a bellatrix block.
|
||||
SignedBeaconBlockBellatrix bellatrix_block = 3;
|
||||
|
||||
// Representing a capella block.
|
||||
SignedBeaconBlockCapella capella_block = 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/testing/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
9
testing/mock/validator_client_mock.go
generated
9
testing/mock/validator_client_mock.go
generated
@@ -9,6 +9,7 @@ import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
@@ -352,18 +353,18 @@ func (mr *MockValidatorClientMockRecorder) SubmitValidatorRegistrations(arg0, ar
|
||||
}
|
||||
|
||||
// SubscribeCommitteeSubnets mocks base method.
|
||||
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest) (*emptypb.Empty, error) {
|
||||
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 []types.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", arg0, arg1)
|
||||
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*emptypb.Empty)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// SubscribeCommitteeSubnets indicates an expected call of SubscribeCommitteeSubnets.
|
||||
func (mr *MockValidatorClientMockRecorder) SubscribeCommitteeSubnets(arg0, arg1 interface{}) *gomock.Call {
|
||||
func (mr *MockValidatorClientMockRecorder) SubscribeCommitteeSubnets(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeCommitteeSubnets", reflect.TypeOf((*MockValidatorClient)(nil).SubscribeCommitteeSubnets), arg0, arg1)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeCommitteeSubnets", reflect.TypeOf((*MockValidatorClient)(nil).SubscribeCommitteeSubnets), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ValidatorIndex mocks base method.
|
||||
|
||||
@@ -10,10 +10,13 @@ go_library(
|
||||
"beacon_block_json_helpers.go",
|
||||
"beacon_block_proto_helpers.go",
|
||||
"domain_data.go",
|
||||
"doppelganger.go",
|
||||
"duties.go",
|
||||
"genesis.go",
|
||||
"get_beacon_block.go",
|
||||
"index.go",
|
||||
"json_rest_handler.go",
|
||||
"log.go",
|
||||
"prepare_beacon_proposer.go",
|
||||
"propose_attestation.go",
|
||||
"propose_beacon_block.go",
|
||||
@@ -23,8 +26,8 @@ go_library(
|
||||
"status.go",
|
||||
"submit_signed_aggregate_proof.go",
|
||||
"submit_signed_contribution_and_proof.go",
|
||||
"subscribe_committee_subnets.go",
|
||||
"sync_committee.go",
|
||||
"sync_message_block_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/validator/client/beacon-api",
|
||||
visibility = ["//validator:__subpackages__"],
|
||||
@@ -38,9 +41,12 @@ go_library(
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
@@ -57,6 +63,8 @@ go_test(
|
||||
"beacon_block_json_helpers_test.go",
|
||||
"beacon_block_proto_helpers_test.go",
|
||||
"domain_data_test.go",
|
||||
"doppelganger_test.go",
|
||||
"duties_test.go",
|
||||
"genesis_test.go",
|
||||
"get_beacon_block_altair_test.go",
|
||||
"get_beacon_block_bellatrix_test.go",
|
||||
@@ -80,14 +88,15 @@ go_test(
|
||||
"status_test.go",
|
||||
"submit_signed_aggregate_proof_test.go",
|
||||
"submit_signed_contribution_and_proof_test.go",
|
||||
"subscribe_committee_subnets_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_message_block_root_test.go",
|
||||
"wait_for_chain_start_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -95,6 +104,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"//validator/client/beacon-api/mock:go_default_library",
|
||||
"//validator/client/beacon-api/test-helpers:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
@@ -42,3 +47,81 @@ func buildURL(path string, queryParams ...neturl.Values) string {
|
||||
|
||||
return fmt.Sprintf("%s?%s", path, queryParams[0].Encode())
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getFork(ctx context.Context) (*apimiddleware.StateForkResponseJson, error) {
|
||||
const endpoint = "/eth/v1/beacon/states/head/fork"
|
||||
|
||||
stateForkResponseJson := &apimiddleware.StateForkResponseJson{}
|
||||
|
||||
_, err := c.jsonRestHandler.GetRestJsonResponse(
|
||||
ctx,
|
||||
endpoint,
|
||||
stateForkResponseJson,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get json response from `%s` REST endpoint", endpoint)
|
||||
}
|
||||
|
||||
return stateForkResponseJson, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getHeaders(ctx context.Context) (*apimiddleware.BlockHeadersResponseJson, error) {
|
||||
const endpoint = "/eth/v1/beacon/headers"
|
||||
|
||||
blockHeadersResponseJson := &apimiddleware.BlockHeadersResponseJson{}
|
||||
|
||||
_, err := c.jsonRestHandler.GetRestJsonResponse(
|
||||
ctx,
|
||||
endpoint,
|
||||
blockHeadersResponseJson,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get json response from `%s` REST endpoint", endpoint)
|
||||
}
|
||||
|
||||
return blockHeadersResponseJson, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getLiveness(ctx context.Context, epoch types.Epoch, validatorIndexes []string) (*apimiddleware.LivenessResponseJson, error) {
|
||||
const endpoint = "/eth/v1/validator/liveness/"
|
||||
url := endpoint + strconv.FormatUint(uint64(epoch), 10)
|
||||
|
||||
livenessResponseJson := &apimiddleware.LivenessResponseJson{}
|
||||
|
||||
marshalledJsonValidatorIndexes, err := json.Marshal(validatorIndexes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal validator indexes")
|
||||
}
|
||||
|
||||
if _, err := c.jsonRestHandler.PostRestJson(ctx, url, nil, bytes.NewBuffer(marshalledJsonValidatorIndexes), livenessResponseJson); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to send POST data to `%s` REST URL", url)
|
||||
}
|
||||
|
||||
return livenessResponseJson, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getSyncing(ctx context.Context) (*apimiddleware.SyncingResponseJson, error) {
|
||||
const endpoint = "/eth/v1/node/syncing"
|
||||
|
||||
syncingResponseJson := &apimiddleware.SyncingResponseJson{}
|
||||
|
||||
_, err := c.jsonRestHandler.GetRestJsonResponse(
|
||||
ctx,
|
||||
endpoint,
|
||||
syncingResponseJson,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get json response from `%s` REST endpoint", endpoint)
|
||||
}
|
||||
|
||||
return syncingResponseJson, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) isSyncing(ctx context.Context) (bool, error) {
|
||||
response, err := c.getSyncing(ctx)
|
||||
if err != nil || response == nil || response.Data == nil {
|
||||
return true, errors.Wrapf(err, "failed to get syncing status")
|
||||
}
|
||||
|
||||
return response.Data.IsSyncing, err
|
||||
}
|
||||
|
||||
@@ -1,11 +1,20 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/client/beacon-api/mock"
|
||||
)
|
||||
|
||||
func TestBeaconApiHelpers(t *testing.T) {
|
||||
@@ -80,3 +89,296 @@ func TestBuildURL_WithParams(t *testing.T) {
|
||||
actual := buildURL("/aaa/bbb/ccc", params)
|
||||
assert.Equal(t, wanted, actual)
|
||||
}
|
||||
|
||||
const forkEndpoint = "/eth/v1/beacon/states/head/fork"
|
||||
|
||||
func TestGetFork_Nominal(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
stateForkResponseJson := apimiddleware.StateForkResponseJson{}
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
expected := apimiddleware.StateForkResponseJson{
|
||||
Data: &apimiddleware.ForkJson{
|
||||
PreviousVersion: "0x1",
|
||||
CurrentVersion: "0x2",
|
||||
Epoch: "3",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
expected,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
fork, err := validatorClient.getFork(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &expected, fork)
|
||||
}
|
||||
|
||||
func TestGetFork_Invalid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
nil,
|
||||
errors.New("custom error"),
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
_, err := validatorClient.getFork(ctx)
|
||||
require.ErrorContains(t, "failed to get json response from `/eth/v1/beacon/states/head/fork` REST endpoint", err)
|
||||
}
|
||||
|
||||
const headersEndpoint = "/eth/v1/beacon/headers"
|
||||
|
||||
func TestGetHeaders_Nominal(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
blockHeadersResponseJson := apimiddleware.BlockHeadersResponseJson{}
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
expected := apimiddleware.BlockHeadersResponseJson{
|
||||
Data: []*apimiddleware.BlockHeaderContainerJson{
|
||||
{
|
||||
Header: &apimiddleware.BeaconBlockHeaderContainerJson{
|
||||
Message: &apimiddleware.BeaconBlockHeaderJson{
|
||||
Slot: "42",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
expected,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
headers, err := validatorClient.getHeaders(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &expected, headers)
|
||||
}
|
||||
|
||||
func TestGetHeaders_Invalid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
nil,
|
||||
errors.New("custom error"),
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
_, err := validatorClient.getHeaders(ctx)
|
||||
require.ErrorContains(t, "failed to get json response from `/eth/v1/beacon/headers` REST endpoint", err)
|
||||
}
|
||||
|
||||
const livenessEndpoint = "/eth/v1/validator/liveness/42"
|
||||
|
||||
func TestGetLiveness_Nominal(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
livenessResponseJson := apimiddleware.LivenessResponseJson{}
|
||||
|
||||
indexes := []string{"1", "2"}
|
||||
marshalledIndexes, err := json.Marshal(indexes)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{
|
||||
{
|
||||
Index: "1",
|
||||
IsLive: true,
|
||||
},
|
||||
{
|
||||
Index: "2",
|
||||
IsLive: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
livenessEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
&livenessResponseJson,
|
||||
).SetArg(
|
||||
4,
|
||||
expected,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
liveness, err := validatorClient.getLiveness(ctx, 42, indexes)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &expected, liveness)
|
||||
}
|
||||
|
||||
func TestGetLiveness_Invalid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
livenessEndpoint,
|
||||
nil,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
nil,
|
||||
errors.New("custom error"),
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
_, err := validatorClient.getLiveness(ctx, 42, nil)
|
||||
|
||||
require.ErrorContains(t, "failed to send POST data to `/eth/v1/validator/liveness/42` REST URL", err)
|
||||
}
|
||||
|
||||
const syncingEnpoint = "/eth/v1/node/syncing"
|
||||
|
||||
func TestGetIsSyncing_Nominal(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
isSyncing bool
|
||||
}{
|
||||
{
|
||||
name: "Syncing",
|
||||
isSyncing: true,
|
||||
},
|
||||
{
|
||||
name: "Not syncing",
|
||||
isSyncing: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncingResponseJson := apimiddleware.SyncingResponseJson{}
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
expected := apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: testCase.isSyncing,
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
syncingEnpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
expected,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
isSyncing, err := validatorClient.isSyncing(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCase.isSyncing, isSyncing)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIsSyncing_Invalid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncingResponseJson := apimiddleware.SyncingResponseJson{}
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
syncingEnpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
errors.New("custom error"),
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
isSyncing, err := validatorClient.isSyncing(ctx)
|
||||
assert.Equal(t, true, isSyncing)
|
||||
assert.ErrorContains(t, "failed to get syncing status", err)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/client/iface"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
|
||||
type beaconApiValidatorClient struct {
|
||||
genesisProvider genesisProvider
|
||||
dutiesProvider dutiesProvider
|
||||
stateValidatorsProvider stateValidatorsProvider
|
||||
jsonRestHandler jsonRestHandler
|
||||
fallbackClient iface.ValidatorClient
|
||||
@@ -32,6 +34,7 @@ func NewBeaconApiValidatorClientWithFallback(host string, timeout time.Duration,
|
||||
|
||||
return &beaconApiValidatorClient{
|
||||
genesisProvider: beaconApiGenesisProvider{jsonRestHandler: jsonRestHandler},
|
||||
dutiesProvider: beaconApiDutiesProvider{jsonRestHandler: jsonRestHandler},
|
||||
stateValidatorsProvider: beaconApiStateValidatorsProvider{jsonRestHandler: jsonRestHandler},
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
@@ -39,21 +42,11 @@ func NewBeaconApiValidatorClientWithFallback(host string, timeout time.Duration,
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) GetDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
|
||||
if c.fallbackClient != nil {
|
||||
return c.fallbackClient.GetDuties(ctx, in)
|
||||
}
|
||||
|
||||
// TODO: Implement me
|
||||
panic("beaconApiValidatorClient.GetDuties is not implemented. To use a fallback client, create this validator with NewBeaconApiValidatorClientWithFallback instead.")
|
||||
return c.getDuties(ctx, in)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
if c.fallbackClient != nil {
|
||||
return c.fallbackClient.CheckDoppelGanger(ctx, in)
|
||||
}
|
||||
|
||||
// TODO: Implement me
|
||||
panic("beaconApiValidatorClient.CheckDoppelGanger is not implemented. To use a fallback client, create this validator with NewBeaconApiValidatorClientWithFallback instead.")
|
||||
return c.checkDoppelGanger(ctx, in)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
@@ -171,13 +164,8 @@ func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Cont
|
||||
return new(empty.Empty), c.submitValidatorRegistrations(ctx, in.Messages)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest) (*empty.Empty, error) {
|
||||
if c.fallbackClient != nil {
|
||||
return c.fallbackClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
// TODO: Implement me
|
||||
panic("beaconApiValidatorClient.SubscribeCommitteeSubnets is not implemented. To use a fallback client, create this validator with NewBeaconApiValidatorClientWithFallback instead.")
|
||||
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []types.ValidatorIndex) (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, validatorIndices)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
|
||||
|
||||
238
validator/client/beacon-api/doppelganger.go
Normal file
238
validator/client/beacon-api/doppelganger.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
type DoppelGangerInfo struct {
|
||||
validatorEpoch types.Epoch
|
||||
response *ethpb.DoppelGangerResponse_ValidatorResponse
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) checkDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
// Check if there is any doppelganger validator for the last 2 epochs.
|
||||
// - Check if the beacon node is synced
|
||||
// - If we are in Phase0, we consider there is no doppelganger.
|
||||
// - If all validators we want to check doppelganger existence were live in local antislashing
|
||||
// database for the last 2 epochs, we consider there is no doppelganger.
|
||||
// This is typically the case when we reboot the validator client.
|
||||
// - If some validators we want to check doppelganger existence were NOT live
|
||||
// in local antislashing for the last two epochs, then we check onchain if there is
|
||||
// some liveness for these validators. If yes, we consider there is a doppelganger.
|
||||
|
||||
// Check inputs are correct.
|
||||
if in == nil || in.ValidatorRequests == nil || len(in.ValidatorRequests) == 0 {
|
||||
return ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
validatorRequests := in.ValidatorRequests
|
||||
|
||||
// Prepare response.
|
||||
stringPubKeys := make([]string, len(validatorRequests))
|
||||
stringPubKeyToDoppelGangerInfo := make(map[string]DoppelGangerInfo, len(validatorRequests))
|
||||
|
||||
for i, vr := range validatorRequests {
|
||||
if vr == nil {
|
||||
return nil, errors.New("validator request is nil")
|
||||
}
|
||||
|
||||
pubKey := vr.PublicKey
|
||||
stringPubKey := hexutil.Encode(pubKey)
|
||||
stringPubKeys[i] = stringPubKey
|
||||
|
||||
stringPubKeyToDoppelGangerInfo[stringPubKey] = DoppelGangerInfo{
|
||||
validatorEpoch: vr.Epoch,
|
||||
response: ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: pubKey,
|
||||
DuplicateExists: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the beacon node if synced.
|
||||
isSyncing, err := c.isSyncing(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get beacon node sync status")
|
||||
}
|
||||
|
||||
if isSyncing {
|
||||
return nil, errors.New("beacon node not synced")
|
||||
}
|
||||
|
||||
// Retrieve fork version -- Return early if we are in phase0.
|
||||
forkResponse, err := c.getFork(ctx)
|
||||
if err != nil || forkResponse == nil || forkResponse.Data == nil {
|
||||
return nil, errors.Wrapf(err, "failed to get fork")
|
||||
}
|
||||
|
||||
forkVersionBytes, err := hexutil.Decode(forkResponse.Data.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode fork version")
|
||||
}
|
||||
|
||||
forkVersion := binary.LittleEndian.Uint32(forkVersionBytes)
|
||||
|
||||
if forkVersion == version.Phase0 {
|
||||
log.Info("Skipping doppelganger check for Phase 0")
|
||||
return buildResponse(stringPubKeys, stringPubKeyToDoppelGangerInfo), nil
|
||||
}
|
||||
|
||||
// Retrieve current epoch.
|
||||
headers, err := c.getHeaders(ctx)
|
||||
if err != nil || headers == nil || headers.Data == nil || len(headers.Data) == 0 ||
|
||||
headers.Data[0].Header == nil || headers.Data[0].Header.Message == nil {
|
||||
return nil, errors.Wrapf(err, "failed to get headers")
|
||||
}
|
||||
|
||||
headSlotUint64, err := strconv.ParseUint(headers.Data[0].Header.Message.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse head slot")
|
||||
}
|
||||
|
||||
headSlot := types.Slot(headSlotUint64)
|
||||
currentEpoch := slots.ToEpoch(headSlot)
|
||||
|
||||
// Extract input pubkeys we did not validate for the 2 last epochs.
|
||||
// If we detect onchain liveness for these keys during the 2 last epochs, a doppelganger may exist somewhere.
|
||||
var notRecentStringPubKeys []string
|
||||
|
||||
for _, spk := range stringPubKeys {
|
||||
dph, ok := stringPubKeyToDoppelGangerInfo[spk]
|
||||
if !ok {
|
||||
return nil, errors.New("failed to retrieve doppelganger info from string public key")
|
||||
}
|
||||
|
||||
if dph.validatorEpoch+2 < currentEpoch {
|
||||
notRecentStringPubKeys = append(notRecentStringPubKeys, spk)
|
||||
}
|
||||
}
|
||||
|
||||
// If all provided keys are recent (aka `notRecentPubKeys` is empty) we return early
|
||||
// as we are unable to effectively determine if a doppelganger is active.
|
||||
if len(notRecentStringPubKeys) == 0 {
|
||||
return buildResponse(stringPubKeys, stringPubKeyToDoppelGangerInfo), nil
|
||||
}
|
||||
|
||||
// Retrieve correspondence between validator pubkey and index.
|
||||
stateValidators, err := c.stateValidatorsProvider.GetStateValidators(ctx, notRecentStringPubKeys, nil, nil)
|
||||
if err != nil || stateValidators == nil || stateValidators.Data == nil {
|
||||
return nil, errors.Wrapf(err, "failed to get state validators")
|
||||
}
|
||||
|
||||
validators := stateValidators.Data
|
||||
stringPubKeyToIndex := make(map[string]string, len(validators))
|
||||
indexes := make([]string, len(validators))
|
||||
|
||||
for i, v := range validators {
|
||||
if v == nil {
|
||||
return nil, errors.New("validator container is nil")
|
||||
}
|
||||
|
||||
index := v.Index
|
||||
|
||||
if v.Validator == nil {
|
||||
return nil, errors.New("validator is nil")
|
||||
}
|
||||
|
||||
stringPubKeyToIndex[v.Validator.PublicKey] = index
|
||||
indexes[i] = index
|
||||
}
|
||||
|
||||
// Get validators liveness for the the last epoch.
|
||||
// We request a state 1 epoch ago. We are guaranteed to have currentEpoch > 2
|
||||
// since we assume that we are not in phase0.
|
||||
previousEpoch := currentEpoch - 1
|
||||
|
||||
indexToPreviousLiveness, err := c.getIndexToLiveness(ctx, previousEpoch, indexes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get map from validator index to liveness for previous epoch %d", previousEpoch)
|
||||
}
|
||||
|
||||
// Get validators liveness for the current epoch.
|
||||
indexToCurrentLiveness, err := c.getIndexToLiveness(ctx, currentEpoch, indexes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get map from validator index to liveness for current epoch %d", currentEpoch)
|
||||
}
|
||||
|
||||
// Set `DuplicateExists` to `true` if needed.
|
||||
for _, spk := range notRecentStringPubKeys {
|
||||
index, ok := stringPubKeyToIndex[spk]
|
||||
if !ok {
|
||||
// if !ok, the validator corresponding to `stringPubKey` does not exist onchain.
|
||||
continue
|
||||
}
|
||||
|
||||
previousLiveness, ok := indexToPreviousLiveness[index]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to retrieve liveness for previous epoch `%d` for validator index `%s`", previousEpoch, index)
|
||||
}
|
||||
|
||||
if previousLiveness {
|
||||
log.WithField("pubkey", spk).WithField("epoch", previousEpoch).Warn("Doppelganger found")
|
||||
}
|
||||
|
||||
currentLiveness, ok := indexToCurrentLiveness[index]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to retrieve liveness for current epoch `%d` for validator index `%s`", currentEpoch, index)
|
||||
}
|
||||
|
||||
if currentLiveness {
|
||||
log.WithField("pubkey", spk).WithField("epoch", currentEpoch).Warn("Doppelganger found")
|
||||
}
|
||||
|
||||
globalLiveness := previousLiveness || currentLiveness
|
||||
|
||||
if globalLiveness {
|
||||
stringPubKeyToDoppelGangerInfo[spk].response.DuplicateExists = true
|
||||
}
|
||||
}
|
||||
|
||||
return buildResponse(stringPubKeys, stringPubKeyToDoppelGangerInfo), nil
|
||||
}
|
||||
|
||||
func buildResponse(
|
||||
stringPubKeys []string,
|
||||
stringPubKeyToDoppelGangerHelper map[string]DoppelGangerInfo,
|
||||
) *ethpb.DoppelGangerResponse {
|
||||
responses := make([]*ethpb.DoppelGangerResponse_ValidatorResponse, len(stringPubKeys))
|
||||
|
||||
for i, spk := range stringPubKeys {
|
||||
responses[i] = stringPubKeyToDoppelGangerHelper[spk].response
|
||||
}
|
||||
|
||||
return ðpb.DoppelGangerResponse{
|
||||
Responses: responses,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getIndexToLiveness(ctx context.Context, epoch types.Epoch, indexes []string) (map[string]bool, error) {
|
||||
livenessResponse, err := c.getLiveness(ctx, epoch, indexes)
|
||||
if err != nil || livenessResponse.Data == nil {
|
||||
return nil, errors.Wrapf(err, fmt.Sprintf("failed to get liveness for epoch %d", epoch))
|
||||
}
|
||||
|
||||
indexToLiveness := make(map[string]bool, len(livenessResponse.Data))
|
||||
|
||||
for _, liveness := range livenessResponse.Data {
|
||||
if liveness == nil {
|
||||
return nil, errors.New("liveness is nil")
|
||||
}
|
||||
|
||||
indexToLiveness[liveness.Index] = liveness.IsLive
|
||||
}
|
||||
|
||||
return indexToLiveness, nil
|
||||
}
|
||||
859
validator/client/beacon-api/doppelganger_test.go
Normal file
859
validator/client/beacon-api/doppelganger_test.go
Normal file
@@ -0,0 +1,859 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/client/beacon-api/mock"
|
||||
)
|
||||
|
||||
func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
const stringPubKey1 = "0x80000e851c0f53c3246ff726d7ff7766661ca5e12a07c45c114d208d54f0f8233d4380b2e9aff759d69795d1df905526"
|
||||
const stringPubKey2 = "0x80002662ecb857da7a37ed468291cb248979eca5131db56c20843262f7909220c296e18f59af1726ef86ec15c08b8317"
|
||||
const stringPubKey3 = "0x80003a1c67216514e4ab257738e59ef38063edf43bc4a2ef9d38633bdde117384401684c6cf81aa04cf18890e75ab52c"
|
||||
const stringPubKey4 = "0x80007e05ba643a3e5be65d1595154023dc2cf009626f32ab1054c5225a6beb28b8be3d52a463ab45f698df884614c87d"
|
||||
const stringPubKey5 = "0x80006ab8cd402459b445b2f5f955c9bae550bc269717837a8cd68176ce42a21fd372b844d508711d6e0bb0efe65abfe5"
|
||||
const stringPubKey6 = "0x800077c436fc0c57bec2b91509519deadeed235f35f6377e7865e17ee86271120381a49c643829be12d232a4ba8360d2"
|
||||
|
||||
pubKey1, err := hexutil.Decode(stringPubKey1)
|
||||
require.NoError(t, err)
|
||||
|
||||
pubKey2, err := hexutil.Decode(stringPubKey2)
|
||||
require.NoError(t, err)
|
||||
|
||||
pubKey3, err := hexutil.Decode(stringPubKey3)
|
||||
require.NoError(t, err)
|
||||
|
||||
pubKey4, err := hexutil.Decode(stringPubKey4)
|
||||
require.NoError(t, err)
|
||||
|
||||
pubKey5, err := hexutil.Decode(stringPubKey5)
|
||||
require.NoError(t, err)
|
||||
|
||||
pubKey6, err := hexutil.Decode(stringPubKey6)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
doppelGangerInput *ethpb.DoppelGangerRequest
|
||||
doppelGangerExpectedOutput *ethpb.DoppelGangerResponse
|
||||
getSyncingOutput *apimiddleware.SyncingResponseJson
|
||||
getForkOutput *apimiddleware.StateForkResponseJson
|
||||
getHeadersOutput *apimiddleware.BlockHeadersResponseJson
|
||||
getStateValidatorsInterface *struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
}
|
||||
getLivelinessInterfaces []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "nil input",
|
||||
doppelGangerInput: nil,
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil validator requests",
|
||||
doppelGangerInput: ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: nil,
|
||||
},
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty validator requests",
|
||||
doppelGangerInput: ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{},
|
||||
},
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "phase0",
|
||||
doppelGangerInput: ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
|
||||
{PublicKey: pubKey1},
|
||||
{PublicKey: pubKey2},
|
||||
{PublicKey: pubKey3},
|
||||
{PublicKey: pubKey4},
|
||||
{PublicKey: pubKey5},
|
||||
{PublicKey: pubKey6},
|
||||
},
|
||||
},
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
|
||||
{PublicKey: pubKey1, DuplicateExists: false},
|
||||
{PublicKey: pubKey2, DuplicateExists: false},
|
||||
{PublicKey: pubKey3, DuplicateExists: false},
|
||||
{PublicKey: pubKey4, DuplicateExists: false},
|
||||
{PublicKey: pubKey5, DuplicateExists: false},
|
||||
{PublicKey: pubKey6, DuplicateExists: false},
|
||||
},
|
||||
},
|
||||
getSyncingOutput: &apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: false,
|
||||
},
|
||||
},
|
||||
getForkOutput: &apimiddleware.StateForkResponseJson{
|
||||
Data: &apimiddleware.ForkJson{
|
||||
PreviousVersion: "0x00000000",
|
||||
CurrentVersion: "0x00000000",
|
||||
Epoch: "42",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all validators are recent",
|
||||
doppelGangerInput: ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
|
||||
{PublicKey: pubKey1, Epoch: 2},
|
||||
{PublicKey: pubKey2, Epoch: 2},
|
||||
{PublicKey: pubKey3, Epoch: 2},
|
||||
{PublicKey: pubKey4, Epoch: 2},
|
||||
{PublicKey: pubKey5, Epoch: 2},
|
||||
{PublicKey: pubKey6, Epoch: 2},
|
||||
},
|
||||
},
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
|
||||
{PublicKey: pubKey1, DuplicateExists: false},
|
||||
{PublicKey: pubKey2, DuplicateExists: false},
|
||||
{PublicKey: pubKey3, DuplicateExists: false},
|
||||
{PublicKey: pubKey4, DuplicateExists: false},
|
||||
{PublicKey: pubKey5, DuplicateExists: false},
|
||||
{PublicKey: pubKey6, DuplicateExists: false},
|
||||
},
|
||||
},
|
||||
getSyncingOutput: &apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: false,
|
||||
},
|
||||
},
|
||||
getForkOutput: &apimiddleware.StateForkResponseJson{
|
||||
Data: &apimiddleware.ForkJson{
|
||||
PreviousVersion: "0x01000000",
|
||||
CurrentVersion: "0x02000000",
|
||||
Epoch: "2",
|
||||
},
|
||||
},
|
||||
getHeadersOutput: &apimiddleware.BlockHeadersResponseJson{
|
||||
Data: []*apimiddleware.BlockHeaderContainerJson{
|
||||
{
|
||||
Header: &apimiddleware.BeaconBlockHeaderContainerJson{
|
||||
Message: &apimiddleware.BeaconBlockHeaderJson{
|
||||
Slot: "99",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some validators are recent, some not, some duplicates",
|
||||
doppelGangerInput: ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
|
||||
{PublicKey: pubKey1, Epoch: 99}, // recent
|
||||
{PublicKey: pubKey2, Epoch: 80}, // not recent - duplicate on previous epoch
|
||||
{PublicKey: pubKey3, Epoch: 80}, // not recent - duplicate on current epoch
|
||||
{PublicKey: pubKey4, Epoch: 80}, // not recent - duplicate on both previous and current epoch
|
||||
{PublicKey: pubKey5, Epoch: 80}, // non existing validator
|
||||
{PublicKey: pubKey6, Epoch: 80}, // not recent - not duplicate
|
||||
},
|
||||
},
|
||||
doppelGangerExpectedOutput: ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
|
||||
{PublicKey: pubKey1, DuplicateExists: false}, // recent
|
||||
{PublicKey: pubKey2, DuplicateExists: true}, // not recent - duplicate on previous epoch
|
||||
{PublicKey: pubKey3, DuplicateExists: true}, // not recent - duplicate on current epoch
|
||||
{PublicKey: pubKey4, DuplicateExists: true}, // not recent - duplicate on both previous and current epoch
|
||||
{PublicKey: pubKey5, DuplicateExists: false}, // non existing validator
|
||||
{PublicKey: pubKey6, DuplicateExists: false}, // not recent - not duplicate
|
||||
},
|
||||
},
|
||||
getSyncingOutput: &apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: false,
|
||||
},
|
||||
},
|
||||
getForkOutput: &apimiddleware.StateForkResponseJson{
|
||||
Data: &apimiddleware.ForkJson{
|
||||
PreviousVersion: "0x01000000",
|
||||
CurrentVersion: "0x02000000",
|
||||
Epoch: "2",
|
||||
},
|
||||
},
|
||||
getHeadersOutput: &apimiddleware.BlockHeadersResponseJson{
|
||||
Data: []*apimiddleware.BlockHeaderContainerJson{
|
||||
{
|
||||
Header: &apimiddleware.BeaconBlockHeaderContainerJson{
|
||||
Message: &apimiddleware.BeaconBlockHeaderJson{
|
||||
Slot: "3201",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
getStateValidatorsInterface: &struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
}{
|
||||
input: []string{
|
||||
// no stringPubKey1 since recent
|
||||
stringPubKey2, // not recent - duplicate on previous epoch
|
||||
stringPubKey3, // not recent - duplicate on current epoch
|
||||
stringPubKey4, // not recent - duplicate on both previous and current epoch
|
||||
stringPubKey5, // non existing validator
|
||||
stringPubKey6, // not recent - not duplicate
|
||||
},
|
||||
output: &apimiddleware.StateValidatorsResponseJson{
|
||||
Data: []*apimiddleware.ValidatorContainerJson{
|
||||
// No "11111" since corresponding validator is recent
|
||||
{Index: "22222", Validator: &apimiddleware.ValidatorJson{PublicKey: stringPubKey2}}, // not recent - duplicate on previous epoch
|
||||
{Index: "33333", Validator: &apimiddleware.ValidatorJson{PublicKey: stringPubKey3}}, // not recent - duplicate on current epoch
|
||||
{Index: "44444", Validator: &apimiddleware.ValidatorJson{PublicKey: stringPubKey4}}, // not recent - duplicate on both previous and current epoch
|
||||
// No "55555" sicee corresponding validator does not exist
|
||||
{Index: "66666", Validator: &apimiddleware.ValidatorJson{PublicKey: stringPubKey6}}, // not recent - not duplicate
|
||||
},
|
||||
},
|
||||
},
|
||||
getLivelinessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/99", // previous epoch
|
||||
inputStringIndexes: []string{
|
||||
// No "11111" since corresponding validator is recent
|
||||
"22222", // not recent - duplicate on previous epoch
|
||||
"33333", // not recent - duplicate on current epoch
|
||||
"44444", // not recent - duplicate on both previous and current epoch
|
||||
// No "55555" since corresponding validator it does not exist
|
||||
"66666", // not recent - not duplicate
|
||||
},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{
|
||||
// No "11111" since corresponding validator is recent
|
||||
{Index: "22222", IsLive: true}, // not recent - duplicate on previous epoch
|
||||
{Index: "33333", IsLive: false}, // not recent - duplicate on current epoch
|
||||
{Index: "44444", IsLive: true}, // not recent - duplicate on both previous and current epoch
|
||||
// No "55555" since corresponding validator it does not exist
|
||||
{Index: "66666", IsLive: false}, // not recent - not duplicate
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/100", // current epoch
|
||||
inputStringIndexes: []string{
|
||||
// No "11111" since corresponding validator is recent
|
||||
"22222", // not recent - duplicate on previous epoch
|
||||
"33333", // not recent - duplicate on current epoch
|
||||
"44444", // not recent - duplicate on both previous and current epoch
|
||||
// No "55555" since corresponding validator it does not exist
|
||||
"66666", // not recent - not duplicate
|
||||
},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{
|
||||
// No "11111" since corresponding validator is recent
|
||||
{Index: "22222", IsLive: false}, // not recent - duplicate on previous epoch
|
||||
{Index: "33333", IsLive: true}, // not recent - duplicate on current epoch
|
||||
{Index: "44444", IsLive: true}, // not recent - duplicate on both previous and current epoch
|
||||
// No "55555" since corresponding validator it does not exist
|
||||
{Index: "66666", IsLive: false}, // not recent - not duplicate
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if testCase.getSyncingOutput != nil {
|
||||
syncingResponseJson := apimiddleware.SyncingResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
syncingEnpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getSyncingOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getForkOutput != nil {
|
||||
stateForkResponseJson := apimiddleware.StateForkResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getForkOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getHeadersOutput != nil {
|
||||
blockHeadersResponseJson := apimiddleware.BlockHeadersResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getHeadersOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getLivelinessInterfaces != nil {
|
||||
for _, iface := range testCase.getLivelinessInterfaces {
|
||||
livenessResponseJson := apimiddleware.LivenessResponseJson{}
|
||||
|
||||
marshalledIndexes, err := json.Marshal(iface.inputStringIndexes)
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
iface.inputUrl,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
&livenessResponseJson,
|
||||
).SetArg(
|
||||
4,
|
||||
*iface.output,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).Times(1)
|
||||
}
|
||||
}
|
||||
|
||||
stateValidatorsProvider := mock.NewMockstateValidatorsProvider(ctrl)
|
||||
|
||||
if testCase.getStateValidatorsInterface != nil {
|
||||
stateValidatorsProvider.EXPECT().GetStateValidators(
|
||||
ctx,
|
||||
testCase.getStateValidatorsInterface.input,
|
||||
nil,
|
||||
nil,
|
||||
).Return(
|
||||
testCase.getStateValidatorsInterface.output,
|
||||
nil,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
stateValidatorsProvider: stateValidatorsProvider,
|
||||
}
|
||||
|
||||
doppelGangerActualOutput, err := validatorClient.CheckDoppelGanger(
|
||||
context.Background(),
|
||||
testCase.doppelGangerInput,
|
||||
)
|
||||
|
||||
require.DeepEqual(t, testCase.doppelGangerExpectedOutput, doppelGangerActualOutput)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
const stringPubKey = "0x80000e851c0f53c3246ff726d7ff7766661ca5e12a07c45c114d208d54f0f8233d4380b2e9aff759d69795d1df905526"
|
||||
pubKey, err := hexutil.Decode(stringPubKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
standardInputValidatorRequests := []*ethpb.DoppelGangerRequest_ValidatorRequest{
|
||||
{
|
||||
PublicKey: pubKey,
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
standardGetSyncingOutput := &apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: false,
|
||||
},
|
||||
}
|
||||
|
||||
standardGetForkOutput := &apimiddleware.StateForkResponseJson{
|
||||
Data: &apimiddleware.ForkJson{
|
||||
CurrentVersion: "0x02000000",
|
||||
},
|
||||
}
|
||||
|
||||
standardGetHeadersOutput := &apimiddleware.BlockHeadersResponseJson{
|
||||
Data: []*apimiddleware.BlockHeaderContainerJson{
|
||||
{
|
||||
Header: &apimiddleware.BeaconBlockHeaderContainerJson{
|
||||
Message: &apimiddleware.BeaconBlockHeaderJson{
|
||||
Slot: "1000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
standardGetStateValidatorsInterface := &struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
err error
|
||||
}{
|
||||
input: []string{stringPubKey},
|
||||
output: &apimiddleware.StateValidatorsResponseJson{
|
||||
Data: []*apimiddleware.ValidatorContainerJson{
|
||||
{
|
||||
Index: "42",
|
||||
Validator: &apimiddleware.ValidatorJson{
|
||||
PublicKey: stringPubKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
expectedErrorMessage string
|
||||
inputValidatorRequests []*ethpb.DoppelGangerRequest_ValidatorRequest
|
||||
getSyncingOutput *apimiddleware.SyncingResponseJson
|
||||
getSyncingError error
|
||||
getForkOutput *apimiddleware.StateForkResponseJson
|
||||
getForkError error
|
||||
getHeadersOutput *apimiddleware.BlockHeadersResponseJson
|
||||
getHeadersError error
|
||||
getStateValidatorsInterface *struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
err error
|
||||
}
|
||||
getLivenessInterfaces []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "nil validatorRequest",
|
||||
expectedErrorMessage: "validator request is nil",
|
||||
inputValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{nil},
|
||||
},
|
||||
{
|
||||
name: "isSyncing on error",
|
||||
expectedErrorMessage: "failed to get beacon node sync status",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getSyncingError: errors.New("custom error"),
|
||||
},
|
||||
{
|
||||
name: "beacon node not synced",
|
||||
expectedErrorMessage: "beacon node not synced",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: &apimiddleware.SyncingResponseJson{
|
||||
Data: &helpers.SyncDetailsJson{
|
||||
IsSyncing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getFork on error",
|
||||
expectedErrorMessage: "failed to get fork",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: &apimiddleware.StateForkResponseJson{},
|
||||
getForkError: errors.New("custom error"),
|
||||
},
|
||||
{
|
||||
name: "cannot decode fork version",
|
||||
expectedErrorMessage: "failed to decode fork version",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: &apimiddleware.StateForkResponseJson{Data: &apimiddleware.ForkJson{CurrentVersion: "not a version"}},
|
||||
},
|
||||
{
|
||||
name: "get headers on error",
|
||||
expectedErrorMessage: "failed to get headers",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: &apimiddleware.BlockHeadersResponseJson{},
|
||||
getHeadersError: errors.New("custom error"),
|
||||
},
|
||||
{
|
||||
name: "cannot parse head slot",
|
||||
expectedErrorMessage: "failed to parse head slot",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: &apimiddleware.BlockHeadersResponseJson{
|
||||
Data: []*apimiddleware.BlockHeaderContainerJson{
|
||||
{
|
||||
Header: &apimiddleware.BeaconBlockHeaderContainerJson{
|
||||
Message: &apimiddleware.BeaconBlockHeaderJson{
|
||||
Slot: "not a slot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state validators error",
|
||||
expectedErrorMessage: "failed to get state validators",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: &struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
err error
|
||||
}{
|
||||
input: []string{stringPubKey},
|
||||
err: errors.New("custom error"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator container is nil",
|
||||
expectedErrorMessage: "validator container is nil",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: &struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
err error
|
||||
}{
|
||||
input: []string{stringPubKey},
|
||||
output: &apimiddleware.StateValidatorsResponseJson{Data: []*apimiddleware.ValidatorContainerJson{nil}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator is nil",
|
||||
expectedErrorMessage: "validator is nil",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: &struct {
|
||||
input []string
|
||||
output *apimiddleware.StateValidatorsResponseJson
|
||||
err error
|
||||
}{
|
||||
input: []string{stringPubKey},
|
||||
output: &apimiddleware.StateValidatorsResponseJson{Data: []*apimiddleware.ValidatorContainerJson{{Validator: nil}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "previous epoch liveness error",
|
||||
expectedErrorMessage: "failed to get map from validator index to liveness for previous epoch 30",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: standardGetStateValidatorsInterface,
|
||||
getLivenessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/30",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{},
|
||||
err: errors.New("custom error"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "liveness is nil",
|
||||
expectedErrorMessage: "liveness is nil",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: standardGetStateValidatorsInterface,
|
||||
getLivenessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/30",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "current epoch liveness error",
|
||||
expectedErrorMessage: "failed to get map from validator index to liveness for current epoch 31",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: standardGetStateValidatorsInterface,
|
||||
getLivenessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/30",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/31",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{},
|
||||
err: errors.New("custom error"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wrong validator index for previous epoch",
|
||||
expectedErrorMessage: "failed to retrieve liveness for previous epoch `30` for validator index `42`",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: standardGetStateValidatorsInterface,
|
||||
getLivenessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/30",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/31",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wrong validator index for current epoch",
|
||||
expectedErrorMessage: "failed to retrieve liveness for current epoch `31` for validator index `42`",
|
||||
inputValidatorRequests: standardInputValidatorRequests,
|
||||
getSyncingOutput: standardGetSyncingOutput,
|
||||
getForkOutput: standardGetForkOutput,
|
||||
getHeadersOutput: standardGetHeadersOutput,
|
||||
getStateValidatorsInterface: standardGetStateValidatorsInterface,
|
||||
getLivenessInterfaces: []struct {
|
||||
inputUrl string
|
||||
inputStringIndexes []string
|
||||
output *apimiddleware.LivenessResponseJson
|
||||
err error
|
||||
}{
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/30",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{
|
||||
{
|
||||
Index: "42",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
inputUrl: "/eth/v1/validator/liveness/31",
|
||||
inputStringIndexes: []string{"42"},
|
||||
output: &apimiddleware.LivenessResponseJson{
|
||||
Data: []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if testCase.getSyncingOutput != nil {
|
||||
syncingResponseJson := apimiddleware.SyncingResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
syncingEnpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
testCase.getSyncingError,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getSyncingOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getForkOutput != nil {
|
||||
stateForkResponseJson := apimiddleware.StateForkResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
testCase.getForkError,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getForkOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getHeadersOutput != nil {
|
||||
blockHeadersResponseJson := apimiddleware.BlockHeadersResponseJson{}
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
nil,
|
||||
testCase.getHeadersError,
|
||||
).SetArg(
|
||||
2,
|
||||
*testCase.getHeadersOutput,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
stateValidatorsProvider := mock.NewMockstateValidatorsProvider(ctrl)
|
||||
|
||||
if testCase.getStateValidatorsInterface != nil {
|
||||
stateValidatorsProvider.EXPECT().GetStateValidators(
|
||||
ctx,
|
||||
testCase.getStateValidatorsInterface.input,
|
||||
nil,
|
||||
nil,
|
||||
).Return(
|
||||
testCase.getStateValidatorsInterface.output,
|
||||
testCase.getStateValidatorsInterface.err,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
if testCase.getLivenessInterfaces != nil {
|
||||
for _, iface := range testCase.getLivenessInterfaces {
|
||||
livenessResponseJson := apimiddleware.LivenessResponseJson{}
|
||||
|
||||
marshalledIndexes, err := json.Marshal(iface.inputStringIndexes)
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
iface.inputUrl,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
&livenessResponseJson,
|
||||
).SetArg(
|
||||
4,
|
||||
*iface.output,
|
||||
).Return(
|
||||
nil,
|
||||
iface.err,
|
||||
).Times(1)
|
||||
}
|
||||
}
|
||||
|
||||
validatorClient := beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
stateValidatorsProvider: stateValidatorsProvider,
|
||||
}
|
||||
|
||||
_, err := validatorClient.CheckDoppelGanger(
|
||||
context.Background(),
|
||||
ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: testCase.inputValidatorRequests,
|
||||
},
|
||||
)
|
||||
|
||||
require.ErrorContains(t, testCase.expectedErrorMessage, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
300
validator/client/beacon-api/duties.go
Normal file
300
validator/client/beacon-api/duties.go
Normal file
@@ -0,0 +1,300 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type dutiesProvider interface {
|
||||
GetAttesterDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.AttesterDutyJson, error)
|
||||
GetProposerDuties(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.ProposerDutyJson, error)
|
||||
GetSyncDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.SyncCommitteeDuty, error)
|
||||
GetCommittees(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.CommitteeJson, error)
|
||||
}
|
||||
|
||||
type beaconApiDutiesProvider struct {
|
||||
jsonRestHandler jsonRestHandler
|
||||
}
|
||||
|
||||
type committeeIndexSlotPair struct {
|
||||
committeeIndex types.CommitteeIndex
|
||||
slot types.Slot
|
||||
}
|
||||
|
||||
func (c beaconApiValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
|
||||
multipleValidatorStatus, err := c.multipleValidatorStatus(ctx, ðpb.MultipleValidatorStatusRequest{PublicKeys: in.PublicKeys})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get validator status")
|
||||
}
|
||||
|
||||
// Sync committees are an Altair feature
|
||||
fetchSyncDuties := in.Epoch >= params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
currentEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch, multipleValidatorStatus, fetchSyncDuties)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get duties for current epoch `%d`", in.Epoch)
|
||||
}
|
||||
|
||||
nextEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch+1, multipleValidatorStatus, fetchSyncDuties)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get duties for next epoch `%d`", in.Epoch+1)
|
||||
}
|
||||
|
||||
return ðpb.DutiesResponse{
|
||||
Duties: currentEpochDuties,
|
||||
CurrentEpochDuties: currentEpochDuties,
|
||||
NextEpochDuties: nextEpochDuties,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c beaconApiValidatorClient) getDutiesForEpoch(
|
||||
ctx context.Context,
|
||||
epoch types.Epoch,
|
||||
multipleValidatorStatus *ethpb.MultipleValidatorStatusResponse,
|
||||
fetchSyncDuties bool,
|
||||
) ([]*ethpb.DutiesResponse_Duty, error) {
|
||||
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, multipleValidatorStatus.Indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get attester duties for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
var syncDuties []*apimiddleware.SyncCommitteeDuty
|
||||
if fetchSyncDuties {
|
||||
if syncDuties, err = c.dutiesProvider.GetSyncDuties(ctx, epoch, multipleValidatorStatus.Indices); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get sync duties for epoch `%d`", epoch)
|
||||
}
|
||||
}
|
||||
|
||||
var proposerDuties []*apimiddleware.ProposerDutyJson
|
||||
if proposerDuties, err = c.dutiesProvider.GetProposerDuties(ctx, epoch); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get proposer duties for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
committees, err := c.dutiesProvider.GetCommittees(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get committees for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
// Mapping from a validator index to its attesting committee's index and slot
|
||||
attesterDutiesMapping := make(map[types.ValidatorIndex]committeeIndexSlotPair)
|
||||
for _, attesterDuty := range attesterDuties {
|
||||
validatorIndex, err := strconv.ParseUint(attesterDuty.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse attester validator index `%s`", attesterDuty.ValidatorIndex)
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(attesterDuty.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse attester slot `%s`", attesterDuty.Slot)
|
||||
}
|
||||
|
||||
committeeIndex, err := strconv.ParseUint(attesterDuty.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse attester committee index `%s`", attesterDuty.CommitteeIndex)
|
||||
}
|
||||
|
||||
attesterDutiesMapping[types.ValidatorIndex(validatorIndex)] = committeeIndexSlotPair{
|
||||
slot: types.Slot(slot),
|
||||
committeeIndex: types.CommitteeIndex(committeeIndex),
|
||||
}
|
||||
}
|
||||
|
||||
// Mapping from a validator index to its proposal slot
|
||||
proposerDutySlots := make(map[types.ValidatorIndex][]types.Slot)
|
||||
for _, proposerDuty := range proposerDuties {
|
||||
validatorIndex, err := strconv.ParseUint(proposerDuty.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse proposer validator index `%s`", proposerDuty.ValidatorIndex)
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(proposerDuty.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse proposer slot `%s`", proposerDuty.Slot)
|
||||
}
|
||||
|
||||
proposerDutySlots[types.ValidatorIndex(validatorIndex)] = append(proposerDutySlots[types.ValidatorIndex(validatorIndex)], types.Slot(slot))
|
||||
}
|
||||
|
||||
// Set containing all validator indices that are part of a sync committee for this epoch
|
||||
syncDutiesMapping := make(map[types.ValidatorIndex]bool)
|
||||
for _, syncDuty := range syncDuties {
|
||||
validatorIndex, err := strconv.ParseUint(syncDuty.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse sync validator index `%s`", syncDuty.ValidatorIndex)
|
||||
}
|
||||
|
||||
syncDutiesMapping[types.ValidatorIndex(validatorIndex)] = true
|
||||
}
|
||||
|
||||
// Mapping from the {committeeIndex, slot} to each of the committee's validator indices
|
||||
committeeMapping := make(map[committeeIndexSlotPair][]types.ValidatorIndex)
|
||||
for _, committee := range committees {
|
||||
committeeIndex, err := strconv.ParseUint(committee.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse committee index `%s`", committee.Index)
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(committee.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse slot `%s`", committee.Slot)
|
||||
}
|
||||
|
||||
validatorIndices := make([]types.ValidatorIndex, len(committee.Validators))
|
||||
for index, validatorIndexString := range committee.Validators {
|
||||
validatorIndex, err := strconv.ParseUint(validatorIndexString, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse committee validator index `%s`", validatorIndexString)
|
||||
}
|
||||
validatorIndices[index] = types.ValidatorIndex(validatorIndex)
|
||||
}
|
||||
|
||||
key := committeeIndexSlotPair{
|
||||
committeeIndex: types.CommitteeIndex(committeeIndex),
|
||||
slot: types.Slot(slot),
|
||||
}
|
||||
committeeMapping[key] = validatorIndices
|
||||
}
|
||||
|
||||
duties := make([]*ethpb.DutiesResponse_Duty, len(multipleValidatorStatus.Statuses))
|
||||
for index, validatorStatus := range multipleValidatorStatus.Statuses {
|
||||
validatorIndex := multipleValidatorStatus.Indices[index]
|
||||
pubkey := multipleValidatorStatus.PublicKeys[index]
|
||||
|
||||
var attesterSlot types.Slot
|
||||
var committeeIndex types.CommitteeIndex
|
||||
var committeeValidatorIndices []types.ValidatorIndex
|
||||
|
||||
if committeeMappingKey, ok := attesterDutiesMapping[validatorIndex]; ok {
|
||||
committeeIndex = committeeMappingKey.committeeIndex
|
||||
attesterSlot = committeeMappingKey.slot
|
||||
|
||||
if committeeValidatorIndices, ok = committeeMapping[committeeMappingKey]; !ok {
|
||||
return nil, errors.Errorf("failed to find validators for committee index `%d` and slot `%d`", committeeIndex, attesterSlot)
|
||||
}
|
||||
}
|
||||
|
||||
duties[index] = ðpb.DutiesResponse_Duty{
|
||||
Committee: committeeValidatorIndices,
|
||||
CommitteeIndex: committeeIndex,
|
||||
AttesterSlot: attesterSlot,
|
||||
ProposerSlots: proposerDutySlots[types.ValidatorIndex(validatorIndex)],
|
||||
PublicKey: pubkey,
|
||||
Status: validatorStatus.Status,
|
||||
ValidatorIndex: types.ValidatorIndex(validatorIndex),
|
||||
IsSyncCommittee: syncDutiesMapping[types.ValidatorIndex(validatorIndex)],
|
||||
}
|
||||
}
|
||||
|
||||
return duties, nil
|
||||
}
|
||||
|
||||
// GetCommittees retrieves the committees for the given epoch
|
||||
func (c beaconApiDutiesProvider) GetCommittees(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.CommitteeJson, error) {
|
||||
committeeParams := url.Values{}
|
||||
committeeParams.Add("epoch", strconv.FormatUint(uint64(epoch), 10))
|
||||
committeesRequest := buildURL("/eth/v1/beacon/states/head/committees", committeeParams)
|
||||
|
||||
var stateCommittees apimiddleware.StateCommitteesResponseJson
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, committeesRequest, &stateCommittees); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to query committees for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
if stateCommittees.Data == nil {
|
||||
return nil, errors.New("state committees data is nil")
|
||||
}
|
||||
|
||||
for index, committee := range stateCommittees.Data {
|
||||
if committee == nil {
|
||||
return nil, errors.Errorf("committee at index `%d` is nil", index)
|
||||
}
|
||||
}
|
||||
|
||||
return stateCommittees.Data, nil
|
||||
}
|
||||
|
||||
// GetAttesterDuties retrieves the attester duties for the given epoch and validatorIndices
|
||||
func (c beaconApiDutiesProvider) GetAttesterDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.AttesterDutyJson, error) {
|
||||
|
||||
jsonValidatorIndices := make([]string, len(validatorIndices))
|
||||
for index, validatorIndex := range validatorIndices {
|
||||
jsonValidatorIndices[index] = strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
validatorIndicesBytes, err := json.Marshal(jsonValidatorIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal validator indices")
|
||||
}
|
||||
|
||||
attesterDuties := &apimiddleware.AttesterDutiesResponseJson{}
|
||||
if _, err := c.jsonRestHandler.PostRestJson(ctx, fmt.Sprintf("/eth/v1/validator/duties/attester/%d", epoch), nil, bytes.NewBuffer(validatorIndicesBytes), attesterDuties); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to send POST data to REST endpoint")
|
||||
}
|
||||
|
||||
for index, attesterDuty := range attesterDuties.Data {
|
||||
if attesterDuty == nil {
|
||||
return nil, errors.Errorf("attester duty at index `%d` is nil", index)
|
||||
}
|
||||
}
|
||||
|
||||
return attesterDuties.Data, nil
|
||||
}
|
||||
|
||||
// GetProposerDuties retrieves the proposer duties for the given epoch
|
||||
func (c beaconApiDutiesProvider) GetProposerDuties(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.ProposerDutyJson, error) {
|
||||
proposerDuties := apimiddleware.ProposerDutiesResponseJson{}
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, fmt.Sprintf("/eth/v1/validator/duties/proposer/%d", epoch), &proposerDuties); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to query proposer duties for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
if proposerDuties.Data == nil {
|
||||
return nil, errors.New("proposer duties data is nil")
|
||||
}
|
||||
|
||||
for index, proposerDuty := range proposerDuties.Data {
|
||||
if proposerDuty == nil {
|
||||
return nil, errors.Errorf("proposer duty at index `%d` is nil", index)
|
||||
}
|
||||
}
|
||||
|
||||
return proposerDuties.Data, nil
|
||||
}
|
||||
|
||||
// GetSyncDuties retrieves the sync committee duties for the given epoch and validatorIndices
|
||||
func (c beaconApiDutiesProvider) GetSyncDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.SyncCommitteeDuty, error) {
|
||||
jsonValidatorIndices := make([]string, len(validatorIndices))
|
||||
for index, validatorIndex := range validatorIndices {
|
||||
jsonValidatorIndices[index] = strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
validatorIndicesBytes, err := json.Marshal(jsonValidatorIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal validator indices")
|
||||
}
|
||||
|
||||
syncDuties := apimiddleware.SyncCommitteeDutiesResponseJson{}
|
||||
if _, err := c.jsonRestHandler.PostRestJson(ctx, fmt.Sprintf("/eth/v1/validator/duties/sync/%d", epoch), nil, bytes.NewBuffer(validatorIndicesBytes), &syncDuties); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to send POST data to REST endpoint")
|
||||
}
|
||||
|
||||
if syncDuties.Data == nil {
|
||||
return nil, errors.New("sync duties data is nil")
|
||||
}
|
||||
|
||||
for index, syncDuty := range syncDuties.Data {
|
||||
if syncDuty == nil {
|
||||
return nil, errors.Errorf("sync duty at index `%d` is nil", index)
|
||||
}
|
||||
}
|
||||
|
||||
return syncDuties.Data, nil
|
||||
}
|
||||
1518
validator/client/beacon-api/duties_test.go
Normal file
1518
validator/client/beacon-api/duties_test.go
Normal file
File diff suppressed because it is too large
Load Diff
5
validator/client/beacon-api/log.go
Normal file
5
validator/client/beacon-api/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package beacon_api
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon-api")
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"duties_mock.go",
|
||||
"genesis_mock.go",
|
||||
"json_rest_handler_mock.go",
|
||||
"state_validators_mock.go",
|
||||
@@ -12,6 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
97
validator/client/beacon-api/mock/duties_mock.go
generated
Normal file
97
validator/client/beacon-api/mock/duties_mock.go
generated
Normal file
@@ -0,0 +1,97 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: validator/client/beacon-api/duties.go
|
||||
|
||||
// Package mock is a generated GoMock package.
|
||||
package mock
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
apimiddleware "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// MockdutiesProvider is a mock of dutiesProvider interface.
|
||||
type MockdutiesProvider struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockdutiesProviderMockRecorder
|
||||
}
|
||||
|
||||
// MockdutiesProviderMockRecorder is the mock recorder for MockdutiesProvider.
|
||||
type MockdutiesProviderMockRecorder struct {
|
||||
mock *MockdutiesProvider
|
||||
}
|
||||
|
||||
// NewMockdutiesProvider creates a new mock instance.
|
||||
func NewMockdutiesProvider(ctrl *gomock.Controller) *MockdutiesProvider {
|
||||
mock := &MockdutiesProvider{ctrl: ctrl}
|
||||
mock.recorder = &MockdutiesProviderMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockdutiesProvider) EXPECT() *MockdutiesProviderMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetAttesterDuties mocks base method.
|
||||
func (m *MockdutiesProvider) GetAttesterDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.AttesterDutyJson, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAttesterDuties", ctx, epoch, validatorIndices)
|
||||
ret0, _ := ret[0].([]*apimiddleware.AttesterDutyJson)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAttesterDuties indicates an expected call of GetAttesterDuties.
|
||||
func (mr *MockdutiesProviderMockRecorder) GetAttesterDuties(ctx, epoch, validatorIndices interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttesterDuties", reflect.TypeOf((*MockdutiesProvider)(nil).GetAttesterDuties), ctx, epoch, validatorIndices)
|
||||
}
|
||||
|
||||
// GetCommittees mocks base method.
|
||||
func (m *MockdutiesProvider) GetCommittees(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.CommitteeJson, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCommittees", ctx, epoch)
|
||||
ret0, _ := ret[0].([]*apimiddleware.CommitteeJson)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetCommittees indicates an expected call of GetCommittees.
|
||||
func (mr *MockdutiesProviderMockRecorder) GetCommittees(ctx, epoch interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommittees", reflect.TypeOf((*MockdutiesProvider)(nil).GetCommittees), ctx, epoch)
|
||||
}
|
||||
|
||||
// GetProposerDuties mocks base method.
|
||||
func (m *MockdutiesProvider) GetProposerDuties(ctx context.Context, epoch types.Epoch) ([]*apimiddleware.ProposerDutyJson, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetProposerDuties", ctx, epoch)
|
||||
ret0, _ := ret[0].([]*apimiddleware.ProposerDutyJson)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetProposerDuties indicates an expected call of GetProposerDuties.
|
||||
func (mr *MockdutiesProviderMockRecorder) GetProposerDuties(ctx, epoch interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposerDuties", reflect.TypeOf((*MockdutiesProvider)(nil).GetProposerDuties), ctx, epoch)
|
||||
}
|
||||
|
||||
// GetSyncDuties mocks base method.
|
||||
func (m *MockdutiesProvider) GetSyncDuties(ctx context.Context, epoch types.Epoch, validatorIndices []types.ValidatorIndex) ([]*apimiddleware.SyncCommitteeDuty, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetSyncDuties", ctx, epoch, validatorIndices)
|
||||
ret0, _ := ret[0].([]*apimiddleware.SyncCommitteeDuty)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetSyncDuties indicates an expected call of GetSyncDuties.
|
||||
func (mr *MockdutiesProviderMockRecorder) GetSyncDuties(ctx, epoch, validatorIndices interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSyncDuties", reflect.TypeOf((*MockdutiesProvider)(nil).GetSyncDuties), ctx, epoch, validatorIndices)
|
||||
}
|
||||
81
validator/client/beacon-api/subscribe_committee_subnets.go
Normal file
81
validator/client/beacon-api/subscribe_committee_subnets.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
func (c beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []types.ValidatorIndex) error {
|
||||
if in == nil {
|
||||
return errors.New("committee subnets subscribe request is nil")
|
||||
}
|
||||
|
||||
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(validatorIndices) {
|
||||
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length")
|
||||
}
|
||||
|
||||
slotToCommitteesAtSlotMap := make(map[types.Slot]uint64)
|
||||
jsonCommitteeSubscriptions := make([]*apimiddleware.BeaconCommitteeSubscribeJson, len(in.CommitteeIds))
|
||||
for index := range in.CommitteeIds {
|
||||
subscribeSlot := in.Slots[index]
|
||||
subscribeCommitteeId := in.CommitteeIds[index]
|
||||
subscribeIsAggregator := in.IsAggregator[index]
|
||||
subscribeValidatorIndex := validatorIndices[index]
|
||||
|
||||
committeesAtSlot, foundSlot := slotToCommitteesAtSlotMap[subscribeSlot]
|
||||
if !foundSlot {
|
||||
// Lazily fetch the committeesAtSlot from the beacon node if they are not already in the map
|
||||
epoch := slots.ToEpoch(subscribeSlot)
|
||||
duties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, validatorIndices)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get duties for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
for _, duty := range duties {
|
||||
dutySlot, err := strconv.ParseUint(duty.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse slot `%s`", duty.Slot)
|
||||
}
|
||||
|
||||
committees, err := strconv.ParseUint(duty.CommitteesAtSlot, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse CommitteesAtSlot `%s`", duty.CommitteesAtSlot)
|
||||
}
|
||||
|
||||
slotToCommitteesAtSlotMap[types.Slot(dutySlot)] = committees
|
||||
}
|
||||
|
||||
// If the slot still isn't in the map, we either received bad data from the beacon node or the caller of this function gave us bad data
|
||||
if committeesAtSlot, foundSlot = slotToCommitteesAtSlotMap[subscribeSlot]; !foundSlot {
|
||||
return errors.Errorf("failed to get committees for slot `%d`", subscribeSlot)
|
||||
}
|
||||
}
|
||||
|
||||
jsonCommitteeSubscriptions[index] = &apimiddleware.BeaconCommitteeSubscribeJson{
|
||||
CommitteeIndex: strconv.FormatUint(uint64(subscribeCommitteeId), 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot, 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlot), 10),
|
||||
IsAggregator: subscribeIsAggregator,
|
||||
ValidatorIndex: strconv.FormatUint(uint64(subscribeValidatorIndex), 10),
|
||||
}
|
||||
}
|
||||
|
||||
committeeSubscriptionsBytes, err := json.Marshal(jsonCommitteeSubscriptions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal committees subscriptions")
|
||||
}
|
||||
|
||||
if _, err := c.jsonRestHandler.PostRestJson(ctx, "/eth/v1/validator/beacon_committee_subscriptions", nil, bytes.NewBuffer(committeeSubscriptionsBytes), nil); err != nil {
|
||||
return errors.Wrap(err, "failed to send POST data to REST endpoint")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
295
validator/client/beacon-api/subscribe_committee_subnets_test.go
Normal file
295
validator/client/beacon-api/subscribe_committee_subnets_test.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/client/beacon-api/mock"
|
||||
)
|
||||
|
||||
const subscribeCommitteeSubnetsTestEndpoint = "/eth/v1/validator/beacon_committee_subscriptions"
|
||||
|
||||
func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
|
||||
subscribeSlots := []types.Slot{0, 1, 100}
|
||||
validatorIndices := []types.ValidatorIndex{2, 3, 4}
|
||||
committeesAtSlot := []uint64{5, 6, 7}
|
||||
isAggregator := []bool{false, true, false}
|
||||
committeeIndices := []types.CommitteeIndex{8, 9, 10}
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jsonCommitteeSubscriptions := make([]*apimiddleware.BeaconCommitteeSubscribeJson, len(subscribeSlots))
|
||||
for index := range jsonCommitteeSubscriptions {
|
||||
jsonCommitteeSubscriptions[index] = &apimiddleware.BeaconCommitteeSubscribeJson{
|
||||
ValidatorIndex: strconv.FormatUint(uint64(validatorIndices[index]), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(committeeIndices[index]), 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[index], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[index]), 10),
|
||||
IsAggregator: isAggregator[index],
|
||||
}
|
||||
}
|
||||
|
||||
committeeSubscriptionsBytes, err := json.Marshal(jsonCommitteeSubscriptions)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
subscribeCommitteeSubnetsTestEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(committeeSubscriptionsBytes),
|
||||
nil,
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
duties := make([]*apimiddleware.AttesterDutyJson, len(subscribeSlots))
|
||||
for index := range duties {
|
||||
duties[index] = &apimiddleware.AttesterDutyJson{
|
||||
ValidatorIndex: strconv.FormatUint(uint64(validatorIndices[index]), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(committeeIndices[index]), 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[index], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[index]), 10),
|
||||
}
|
||||
}
|
||||
|
||||
// Even though we have 3 distinct slots, the first 2 ones are in the same epoch so we should only send 2 requests to the beacon node
|
||||
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
slots.ToEpoch(subscribeSlots[0]),
|
||||
validatorIndices,
|
||||
).Return(
|
||||
[]*apimiddleware.AttesterDutyJson{
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[0], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[0]), 10),
|
||||
},
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[1], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[1]), 10),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
slots.ToEpoch(subscribeSlots[2]),
|
||||
validatorIndices,
|
||||
).Return(
|
||||
[]*apimiddleware.AttesterDutyJson{
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[2], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[2]), 10),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
dutiesProvider: dutiesProvider,
|
||||
}
|
||||
err = validatorClient.subscribeCommitteeSubnets(
|
||||
ctx,
|
||||
ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: subscribeSlots,
|
||||
CommitteeIds: committeeIndices,
|
||||
IsAggregator: isAggregator,
|
||||
},
|
||||
validatorIndices,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeRequest *ethpb.CommitteeSubnetsSubscribeRequest
|
||||
validatorIndices []types.ValidatorIndex
|
||||
attesterDuty *apimiddleware.AttesterDutyJson
|
||||
dutiesError error
|
||||
expectGetDutiesQuery bool
|
||||
expectSubscribeRestCall bool
|
||||
expectedErrorMessage string
|
||||
}{
|
||||
{
|
||||
name: "nil subscribe request",
|
||||
subscribeRequest: nil,
|
||||
expectedErrorMessage: "committee subnets subscribe request is nil",
|
||||
},
|
||||
{
|
||||
name: "CommitteeIds size mismatch",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
CommitteeIds: []types.CommitteeIndex{1},
|
||||
Slots: []types.Slot{1, 2},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{1, 2},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "Slots size mismatch",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
CommitteeIds: []types.CommitteeIndex{1, 2},
|
||||
Slots: []types.Slot{1},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{1, 2},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "IsAggregator size mismatch",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
CommitteeIds: []types.CommitteeIndex{1, 2},
|
||||
Slots: []types.Slot{1, 2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{1, 2},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "ValidatorIndices size mismatch",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
CommitteeIds: []types.CommitteeIndex{1, 2},
|
||||
Slots: []types.Slot{1, 2},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{1},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "bad duties query",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []types.Slot{1},
|
||||
CommitteeIds: []types.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{3},
|
||||
dutiesError: errors.New("foo error"),
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to get duties for epoch `0`: foo error",
|
||||
},
|
||||
{
|
||||
name: "bad duty slot",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []types.Slot{1},
|
||||
CommitteeIds: []types.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{3},
|
||||
attesterDuty: &apimiddleware.AttesterDutyJson{
|
||||
Slot: "foo",
|
||||
CommitteesAtSlot: "1",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to parse slot `foo`",
|
||||
},
|
||||
{
|
||||
name: "bad duty committees at slot",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []types.Slot{1},
|
||||
CommitteeIds: []types.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{3},
|
||||
attesterDuty: &apimiddleware.AttesterDutyJson{
|
||||
Slot: "1",
|
||||
CommitteesAtSlot: "foo",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to parse CommitteesAtSlot `foo`",
|
||||
},
|
||||
{
|
||||
name: "missing slot in duties",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []types.Slot{1},
|
||||
CommitteeIds: []types.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{3},
|
||||
attesterDuty: &apimiddleware.AttesterDutyJson{
|
||||
Slot: "2",
|
||||
CommitteesAtSlot: "3",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to get committees for slot `1`",
|
||||
},
|
||||
{
|
||||
name: "bad POST request",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []types.Slot{1},
|
||||
CommitteeIds: []types.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []types.ValidatorIndex{3},
|
||||
attesterDuty: &apimiddleware.AttesterDutyJson{
|
||||
Slot: "1",
|
||||
CommitteesAtSlot: "2",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectSubscribeRestCall: true,
|
||||
expectedErrorMessage: "failed to send POST data to REST endpoint: foo error",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
|
||||
if testCase.expectGetDutiesQuery {
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
[]*apimiddleware.AttesterDutyJson{testCase.attesterDuty},
|
||||
testCase.dutiesError,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
if testCase.expectSubscribeRestCall {
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
subscribeCommitteeSubnetsTestEndpoint,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
nil,
|
||||
errors.New("foo error"),
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
dutiesProvider: dutiesProvider,
|
||||
}
|
||||
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.validatorIndices)
|
||||
assert.ErrorContains(t, testCase.expectedErrorMessage, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -33,3 +35,94 @@ func (c *beaconApiValidatorClient) submitSyncMessage(ctx context.Context, syncMe
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getSyncMessageBlockRoot(ctx context.Context) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
// Get head beacon block root.
|
||||
var resp apimiddleware.BlockRootResponseJson
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, "/eth/v1/beacon/blocks/head/root", &resp); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to query GET REST endpoint")
|
||||
}
|
||||
|
||||
// An optimistic validator MUST NOT participate in sync committees
|
||||
// (i.e., sign across the DOMAIN_SYNC_COMMITTEE, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF or DOMAIN_CONTRIBUTION_AND_PROOF domains).
|
||||
if resp.ExecutionOptimistic {
|
||||
return nil, errors.New("the node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
|
||||
if resp.Data == nil {
|
||||
return nil, errors.New("no data returned")
|
||||
}
|
||||
|
||||
if resp.Data.Root == "" {
|
||||
return nil, errors.New("no root returned")
|
||||
}
|
||||
|
||||
blockRoot, err := hexutil.Decode(resp.Data.Root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode beacon block root")
|
||||
}
|
||||
|
||||
return ðpb.SyncMessageBlockRootResponse{
|
||||
Root: blockRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) getSyncCommitteeContribution(
|
||||
ctx context.Context,
|
||||
req *ethpb.SyncCommitteeContributionRequest,
|
||||
) (*ethpb.SyncCommitteeContribution, error) {
|
||||
blockRootResponse, err := c.getSyncMessageBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get sync message block root")
|
||||
}
|
||||
|
||||
blockRoot := hexutil.Encode(blockRootResponse.Root)
|
||||
url := fmt.Sprintf("/eth/v1/validator/sync_committee_contribution?slot=%d&subcommittee_index=%d&beacon_block_root=%s",
|
||||
uint64(req.Slot), req.SubnetId, blockRoot)
|
||||
|
||||
var resp apimiddleware.ProduceSyncCommitteeContributionResponseJson
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, url, &resp); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to query GET REST endpoint")
|
||||
}
|
||||
|
||||
return convertSyncContributionJsonToProto(resp.Data)
|
||||
}
|
||||
|
||||
func convertSyncContributionJsonToProto(contribution *apimiddleware.SyncCommitteeContributionJson) (*ethpb.SyncCommitteeContribution, error) {
|
||||
if contribution == nil {
|
||||
return nil, errors.New("sync committee contribution is nil")
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(contribution.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse slot `%s`", contribution.Slot)
|
||||
}
|
||||
|
||||
blockRoot, err := hexutil.Decode(contribution.BeaconBlockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode beacon block root `%s`", contribution.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
subcommitteeIdx, err := strconv.ParseUint(contribution.SubcommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse subcommittee index `%s`", contribution.SubcommitteeIndex)
|
||||
}
|
||||
|
||||
aggregationBits, err := hexutil.Decode(contribution.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode aggregation bits `%s`", contribution.AggregationBits)
|
||||
}
|
||||
|
||||
signature, err := hexutil.Decode(contribution.Signature)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode contribution signature `%s`", contribution.Signature)
|
||||
}
|
||||
|
||||
return ðpb.SyncCommitteeContribution{
|
||||
Slot: types.Slot(slot),
|
||||
BlockRoot: blockRoot,
|
||||
SubcommitteeIndex: subcommitteeIdx,
|
||||
AggregationBits: aggregationBits,
|
||||
Signature: signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -88,3 +89,167 @@ func TestSubmitSyncMessage_BadRequest(t *testing.T) {
|
||||
assert.ErrorContains(t, "failed to send POST data to `/eth/v1/beacon/pool/sync_committees` REST endpoint", err)
|
||||
assert.ErrorContains(t, "foo error", err)
|
||||
}
|
||||
|
||||
func TestGetSyncMessageBlockRoot(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
const blockRoot = "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
tests := []struct {
|
||||
name string
|
||||
endpointError error
|
||||
expectedErrorMessage string
|
||||
expectedResponse apimiddleware.BlockRootResponseJson
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
Data: &apimiddleware.BlockRootContainerJson{
|
||||
Root: blockRoot,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "internal server error",
|
||||
expectedErrorMessage: "internal server error",
|
||||
endpointError: errors.New("internal server error"),
|
||||
},
|
||||
{
|
||||
name: "execution optimistic",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
ExecutionOptimistic: true,
|
||||
},
|
||||
expectedErrorMessage: "the node is currently optimistic and cannot serve validators",
|
||||
},
|
||||
{
|
||||
name: "no data",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{},
|
||||
expectedErrorMessage: "no data returned",
|
||||
},
|
||||
{
|
||||
name: "no root",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
Data: new(apimiddleware.BlockRootContainerJson),
|
||||
},
|
||||
expectedErrorMessage: "no root returned",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
"/eth/v1/beacon/blocks/head/root",
|
||||
&apimiddleware.BlockRootResponseJson{},
|
||||
).SetArg(
|
||||
2,
|
||||
test.expectedResponse,
|
||||
).Return(
|
||||
nil,
|
||||
test.endpointError,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
actualResponse, err := validatorClient.getSyncMessageBlockRoot(ctx)
|
||||
if test.expectedErrorMessage != "" {
|
||||
require.ErrorContains(t, test.expectedErrorMessage, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRootBytes, err := hexutil.Decode(test.expectedResponse.Data.Root)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedRootBytes, actualResponse.Root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeContribution(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
const blockRoot = "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
|
||||
request := ðpb.SyncCommitteeContributionRequest{
|
||||
Slot: types.Slot(1),
|
||||
PublicKey: nil,
|
||||
SubnetId: 1,
|
||||
}
|
||||
|
||||
contributionJson := &apimiddleware.SyncCommitteeContributionJson{
|
||||
Slot: "1",
|
||||
BeaconBlockRoot: blockRoot,
|
||||
SubcommitteeIndex: "1",
|
||||
AggregationBits: "0x01",
|
||||
Signature: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
contribution apimiddleware.ProduceSyncCommitteeContributionResponseJson
|
||||
endpointErr error
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
contribution: apimiddleware.ProduceSyncCommitteeContributionResponseJson{Data: contributionJson},
|
||||
},
|
||||
{
|
||||
name: "bad request",
|
||||
endpointErr: errors.New("internal server error"),
|
||||
expectedErrMsg: "internal server error",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
"/eth/v1/beacon/blocks/head/root",
|
||||
&apimiddleware.BlockRootResponseJson{},
|
||||
).SetArg(
|
||||
2,
|
||||
apimiddleware.BlockRootResponseJson{
|
||||
Data: &apimiddleware.BlockRootContainerJson{
|
||||
Root: blockRoot,
|
||||
},
|
||||
},
|
||||
).Return(
|
||||
nil,
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
fmt.Sprintf("/eth/v1/validator/sync_committee_contribution?slot=%d&subcommittee_index=%d&beacon_block_root=%s",
|
||||
uint64(request.Slot), request.SubnetId, blockRoot),
|
||||
&apimiddleware.ProduceSyncCommitteeContributionResponseJson{},
|
||||
).SetArg(
|
||||
2,
|
||||
test.contribution,
|
||||
).Return(
|
||||
nil,
|
||||
test.endpointErr,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
actualResponse, err := validatorClient.getSyncCommitteeContribution(ctx, request)
|
||||
if test.expectedErrMsg != "" {
|
||||
require.ErrorContains(t, test.expectedErrMsg, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedResponse, err := convertSyncContributionJsonToProto(test.contribution.Data)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedResponse, actualResponse)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (c *beaconApiValidatorClient) getSyncMessageBlockRoot(ctx context.Context) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
// Get head beacon block root.
|
||||
var resp apimiddleware.BlockRootResponseJson
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, "/eth/v1/beacon/blocks/head/root", &resp); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to query GET REST endpoint")
|
||||
}
|
||||
|
||||
// An optimistic validator MUST NOT participate in sync committees
|
||||
// (i.e., sign across the DOMAIN_SYNC_COMMITTEE, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF or DOMAIN_CONTRIBUTION_AND_PROOF domains).
|
||||
if resp.ExecutionOptimistic {
|
||||
return nil, errors.New("the node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
|
||||
if resp.Data == nil {
|
||||
return nil, errors.New("no data returned")
|
||||
}
|
||||
|
||||
if resp.Data.Root == "" {
|
||||
return nil, errors.New("no root returned")
|
||||
}
|
||||
|
||||
blockRoot, err := hexutil.Decode(resp.Data.Root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode beacon block root")
|
||||
}
|
||||
|
||||
return ðpb.SyncMessageBlockRootResponse{
|
||||
Root: blockRoot,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/validator/client/beacon-api/mock"
|
||||
)
|
||||
|
||||
func TestGetSyncMessageBlockRoot(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
const blockRoot = "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
tests := []struct {
|
||||
name string
|
||||
endpointError error
|
||||
expectedErrorMessage string
|
||||
expectedResponse apimiddleware.BlockRootResponseJson
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
Data: &apimiddleware.BlockRootContainerJson{
|
||||
Root: blockRoot,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "internal server error",
|
||||
expectedErrorMessage: "internal server error",
|
||||
endpointError: errors.New("internal server error"),
|
||||
},
|
||||
{
|
||||
name: "execution optimistic",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
ExecutionOptimistic: true,
|
||||
},
|
||||
expectedErrorMessage: "the node is currently optimistic and cannot serve validators",
|
||||
},
|
||||
{
|
||||
name: "no data",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{},
|
||||
expectedErrorMessage: "no data returned",
|
||||
},
|
||||
{
|
||||
name: "no root",
|
||||
expectedResponse: apimiddleware.BlockRootResponseJson{
|
||||
Data: new(apimiddleware.BlockRootContainerJson),
|
||||
},
|
||||
expectedErrorMessage: "no root returned",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().GetRestJsonResponse(
|
||||
ctx,
|
||||
"/eth/v1/beacon/blocks/head/root",
|
||||
&apimiddleware.BlockRootResponseJson{},
|
||||
).SetArg(
|
||||
2,
|
||||
test.expectedResponse,
|
||||
).Return(
|
||||
nil,
|
||||
test.endpointError,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
actualResponse, err := validatorClient.getSyncMessageBlockRoot(ctx)
|
||||
if test.expectedErrorMessage != "" {
|
||||
require.ErrorContains(t, test.expectedErrorMessage, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRootBytes, err := hexutil.Decode(test.expectedResponse.Data.Root)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedRootBytes, actualResponse.Root)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/validator/client/grpc-api",
|
||||
visibility = ["//validator:__subpackages__"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
iface "github.com/prysmaticlabs/prysm/v3/validator/client/iface"
|
||||
"google.golang.org/grpc"
|
||||
@@ -98,7 +99,7 @@ func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context,
|
||||
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest) (*empty.Empty, error) {
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []types.ValidatorIndex) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ type ValidatorClient interface {
|
||||
SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error)
|
||||
SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error)
|
||||
ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error)
|
||||
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest) (*empty.Empty, error)
|
||||
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []types.ValidatorIndex) (*empty.Empty, error)
|
||||
CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error)
|
||||
GetSyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error)
|
||||
SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error)
|
||||
|
||||
@@ -346,6 +346,8 @@ func (v *validator) ReceiveBlocks(ctx context.Context, connectionErrorChannel ch
|
||||
blk, err = blocks.NewSignedBeaconBlock(b.AltairBlock)
|
||||
case *ethpb.StreamBlocksResponse_BellatrixBlock:
|
||||
blk, err = blocks.NewSignedBeaconBlock(b.BellatrixBlock)
|
||||
case *ethpb.StreamBlocksResponse_CapellaBlock:
|
||||
blk, err = blocks.NewSignedBeaconBlock(b.CapellaBlock)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to wrap signed block")
|
||||
@@ -626,6 +628,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots := make([]types.Slot, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeCommitteeIndices := make([]types.CommitteeIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeIsAggregator := make([]bool, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeValidatorIndices := make([]types.ValidatorIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
alreadySubscribed := make(map[[64]byte]bool)
|
||||
|
||||
for _, duty := range res.CurrentEpochDuties {
|
||||
@@ -633,6 +636,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
|
||||
attesterSlot := duty.AttesterSlot
|
||||
committeeIndex := duty.CommitteeIndex
|
||||
validatorIndex := duty.ValidatorIndex
|
||||
|
||||
alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex)
|
||||
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
|
||||
@@ -650,6 +654,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots = append(subscribeSlots, attesterSlot)
|
||||
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
|
||||
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
|
||||
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -657,6 +662,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
|
||||
attesterSlot := duty.AttesterSlot
|
||||
committeeIndex := duty.CommitteeIndex
|
||||
validatorIndex := duty.ValidatorIndex
|
||||
|
||||
alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex)
|
||||
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
|
||||
@@ -674,14 +680,18 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots = append(subscribeSlots, attesterSlot)
|
||||
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
|
||||
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
|
||||
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := v.validatorClient.SubscribeCommitteeSubnets(ctx, ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: subscribeSlots,
|
||||
CommitteeIds: subscribeCommitteeIndices,
|
||||
IsAggregator: subscribeIsAggregator,
|
||||
})
|
||||
_, err := v.validatorClient.SubscribeCommitteeSubnets(ctx,
|
||||
ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: subscribeSlots,
|
||||
CommitteeIds: subscribeCommitteeIndices,
|
||||
IsAggregator: subscribeIsAggregator,
|
||||
},
|
||||
subscribeValidatorIndices,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -539,7 +539,8 @@ func TestUpdateDuties_OK(t *testing.T) {
|
||||
client.EXPECT().SubscribeCommitteeSubnets(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest) (*emptypb.Empty, error) {
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []types.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
wg.Done()
|
||||
return nil, nil
|
||||
})
|
||||
@@ -594,7 +595,8 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
|
||||
client.EXPECT().SubscribeCommitteeSubnets(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest) (*emptypb.Empty, error) {
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []types.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
wg.Done()
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user