mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54ae2b32b2 | ||
|
|
58f4ba758c | ||
|
|
64f64f06bf | ||
|
|
e70055733f | ||
|
|
36e4f49af0 | ||
|
|
d98428dec4 | ||
|
|
00b92e01d3 | ||
|
|
b84c1aa3ea | ||
|
|
ca5adbf7e4 | ||
|
|
621e149dce | ||
|
|
a083b7a0a5 | ||
|
|
dd5995b665 | ||
|
|
6903d52dde | ||
|
|
ac8d27bcf1 | ||
|
|
8d6afb3afd | ||
|
|
d51f716675 | ||
|
|
0411b7eceb | ||
|
|
8dfc80187d | ||
|
|
3833f78803 |
22
.github/workflows/horusec.yaml
vendored
Normal file
22
.github/workflows/horusec.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Horusec Security Scan
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Runs cron at 16.00 UTC on
|
||||
- cron: '0 0 * * SUN'
|
||||
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with: # Required when commit authors is enabled
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
@@ -48,6 +48,7 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
|
||||
@@ -19,10 +19,11 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
@@ -37,9 +38,11 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -57,9 +60,11 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -74,9 +79,11 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -44,7 +44,11 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHead(ctx, headRoot, headBlock)
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
return s.saveHead(ctx, headRoot, headBlock, headState)
|
||||
}
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
@@ -101,7 +105,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock) error {
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
@@ -116,6 +120,9 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
|
||||
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
@@ -123,15 +130,6 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if newHeadState == nil || newHeadState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
@@ -170,7 +168,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, headBlock, newHeadState)
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
@@ -180,7 +178,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -31,7 +31,8 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
service.head = &head{slot: 0, root: r}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b))
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
@@ -69,7 +70,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -115,7 +116,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -159,7 +160,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb))
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
@@ -138,6 +138,26 @@ var (
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_valid_node_count",
|
||||
Help: "Count the number of valid nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after newPayload EE call",
|
||||
})
|
||||
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_valid_node_count",
|
||||
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -5,14 +5,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -20,7 +27,7 @@ import (
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
@@ -66,24 +73,36 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
}
|
||||
|
||||
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
}
|
||||
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
@@ -114,20 +133,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errors.New("could not validate an INVALID payload from execution engine")
|
||||
default:
|
||||
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
newPayloadValidNodeCount.Inc()
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
@@ -174,3 +208,69 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
|
||||
}
|
||||
return parentIsExecutionBlock, nil
|
||||
}
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
st = st.Copy()
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
attr := &enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
}
|
||||
return true, attr, proposerID, nil
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
|
||||
for _, root := range blkRoots {
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete block also deletes the state as well.
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
|
||||
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
|
||||
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
@@ -24,6 +27,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
@@ -44,8 +48,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
@@ -157,7 +166,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -183,13 +193,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
@@ -197,10 +200,32 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -244,7 +269,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate execution payload from execution engine: payload status is INVALID",
|
||||
errString: "could not validate an INVALID payload from execution engine",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
@@ -573,6 +598,47 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
require.Equal(t, true, candidate)
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
@@ -588,7 +654,6 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
@@ -671,3 +736,59 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deleting unknown block should not error.
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
|
||||
|
||||
// Happy case
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 1,
|
||||
Root: r1[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 2,
|
||||
Root: r2[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
|
||||
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
|
||||
|
||||
require.Equal(t, false, service.hasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
|
||||
has, err := service.cfg.StateGen.HasState(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
has, err = service.cfg.StateGen.HasState(ctx, r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -66,6 +67,14 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposerIdsCache for proposer id cache.
|
||||
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ProposerSlotIndexCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationPool for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationPool(p attestations.Pool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -127,8 +127,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -148,6 +148,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -212,10 +215,14 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headRoot, headBlock); err != nil {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
|
||||
@@ -428,7 +435,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
@@ -605,9 +612,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -186,7 +186,13 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
log.WithError(err).Error("Could not get block from db")
|
||||
return
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx,
|
||||
headState,
|
||||
newHeadBlock.Block(),
|
||||
newHeadRoot,
|
||||
bytesutil.ToBytes32(finalized.Root),
|
||||
@@ -194,7 +200,7 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock); err != nil {
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -133,7 +134,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
@@ -156,9 +157,20 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
r1, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalized := ðpb.Checkpoint{Root: r1[:], Epoch: 0}
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r1,
|
||||
block: wsb,
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ type config struct {
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
|
||||
@@ -23,9 +23,10 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/BUILD.bazel
vendored
3
beacon-chain/cache/BUILD.bazel
vendored
@@ -13,6 +13,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
@@ -26,6 +27,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -61,6 +63,7 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
@@ -36,7 +36,7 @@ type DepositFetcher interface {
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
|
||||
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
@@ -246,7 +246,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
@@ -256,10 +256,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,7 +554,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), nil)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -611,7 +611,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
|
||||
73
beacon-chain/cache/payload_id.go
vendored
Normal file
73
beacon-chain/cache/payload_id.go
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const vIdLength = 8
|
||||
const pIdLength = 8
|
||||
const vpIdsLength = vIdLength + pIdLength
|
||||
|
||||
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
|
||||
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
|
||||
type ProposerPayloadIDsCache struct {
|
||||
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
|
||||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
|
||||
return &ProposerPayloadIDsCache{
|
||||
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
if !ok {
|
||||
return 0, [8]byte{}, false
|
||||
}
|
||||
vId := ids[:vIdLength]
|
||||
|
||||
b := ids[vIdLength:]
|
||||
var pId [pIdLength]byte
|
||||
copy(pId[:], b)
|
||||
|
||||
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
|
||||
}
|
||||
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var vIdBytes [vIdLength]byte
|
||||
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
|
||||
|
||||
var bytes [vpIdsLength]byte
|
||||
copy(bytes[:], append(vIdBytes[:], pId[:]...))
|
||||
|
||||
_, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
// Ok to overwrite if the slot is already set but the payload ID is not set.
|
||||
// This combats the re-org case where payload assignment could change the epoch of.
|
||||
if !ok || (ok && pId != [pIdLength]byte{}) {
|
||||
f.slotToProposerAndPayloadIDs[slot] = bytes
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePayloadIDs removes the payload id entries that's current than input slot.
|
||||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
for s := range f.slotToProposerAndPayloadIDs {
|
||||
if slot > s {
|
||||
delete(f.slotToProposerAndPayloadIDs, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||
cache := NewProposerPayloadIDsCache()
|
||||
i, p, ok := cache.GetProposerPayloadIDs(0)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
slot := types.Slot(1234)
|
||||
vid := types.ValidatorIndex(34234324)
|
||||
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(6786745)
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
// reset cache without pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
// reset cache with existing pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, newPid, p)
|
||||
|
||||
// remove cache entry
|
||||
cache.PrunePayloadIDs(slot + 1)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
}
|
||||
@@ -175,9 +175,7 @@ func CommitteeAssignments(
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
// Proposal epochs do not have a look ahead, so we skip them over here.
|
||||
validProposalEpoch := epoch < nextEpoch
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
@@ -192,6 +190,15 @@ func CommitteeAssignments(
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
@@ -233,13 +233,17 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
if err := s.DeleteState(ctx, root); err != nil {
|
||||
return errDeleteFinalized
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.deleteStateSummary(root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return errDeleteFinalized
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
|
||||
@@ -191,6 +191,16 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
for i, blk := range blks {
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: blk.Block().Slot(),
|
||||
Root: r[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -216,11 +226,50 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
b, err = db.Block(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, nil)
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root2))
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -2,8 +2,8 @@ package kv
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
|
||||
@@ -346,19 +346,31 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
|
||||
bkt = tx.Bucket(checkpointBucket)
|
||||
enc := bkt.Get(finalizedCheckpointKey)
|
||||
checkpoint := ðpb.Checkpoint{}
|
||||
finalized := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
checkpoint = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, checkpoint); err != nil {
|
||||
finalized = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, finalized); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc = bkt.Get(justifiedCheckpointKey)
|
||||
justified := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
justified = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, justified); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockBkt := tx.Bucket(blocksBucket)
|
||||
headBlkRoot := blockBkt.Get(headBlockRootKey)
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
// Safe guard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
|
||||
return errors.New("cannot delete genesis, finalized, or head state")
|
||||
// Safeguard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Nothing to delete if state doesn't exist.
|
||||
enc = bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
|
||||
|
||||
@@ -110,3 +110,12 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
s.stateSummaryCache.clear()
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteStateSummary deletes a state summary object from the db using input block root.
|
||||
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
|
||||
s.stateSummaryCache.delete(blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
return bucket.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -37,6 +37,13 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// delete state summary in cache.
|
||||
func (c *stateSummaryCache) delete(r [32]byte) {
|
||||
c.initSyncStateSummariesLock.Lock()
|
||||
defer c.initSyncStateSummariesLock.Unlock()
|
||||
delete(c.initSyncStateSummaries, r)
|
||||
}
|
||||
|
||||
// get retrieves a state summary from the initial sync state summaries cache using the root of
|
||||
// the block.
|
||||
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {
|
||||
|
||||
@@ -62,3 +62,17 @@ func TestStateSummary_CacheToDB(t *testing.T) {
|
||||
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSummary_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
r1 := bytesutil.ToBytes32([]byte{'A'})
|
||||
s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]}
|
||||
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
require.NoError(t, db.SaveStateSummary(ctx, s1))
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(r1))
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
|
||||
}
|
||||
|
||||
@@ -440,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
finalizedCheckpoint := ðpb.Checkpoint{Root: finalizedBlockRoot[:]}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
|
||||
}
|
||||
|
||||
@@ -465,8 +465,7 @@ func TestStore_DeleteHeadState(t *testing.T) {
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
|
||||
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
|
||||
}
|
||||
|
||||
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
|
||||
@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,11 +22,13 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ package doublylinkedtree
|
||||
import "errors"
|
||||
|
||||
var ErrNilNode = errors.New("invalid nil or unknown node")
|
||||
var errInvalidBalance = errors.New("invalid node balance")
|
||||
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
|
||||
@@ -2,12 +2,15 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -250,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
return ErrNilNode
|
||||
}
|
||||
if currentNode.balance < oldBalance {
|
||||
return errInvalidBalance
|
||||
f.store.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
|
||||
"oldBalance": oldBalance,
|
||||
"nodeBalance": currentNode.balance,
|
||||
"nodeWeight": currentNode.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": f.store.previousProposerBoostScore,
|
||||
}).Warning("node with invalid balance, setting it to zero")
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
currentNode.balance = 0
|
||||
} else {
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
{indexToHash(2), indexToHash(2), 0},
|
||||
{indexToHash(3), indexToHash(3), 0},
|
||||
}
|
||||
|
||||
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -3,9 +3,12 @@ package doublylinkedtree
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_head_slot",
|
||||
@@ -48,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,14 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,9 +3,12 @@ package protoarray
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-protoarray")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_head_slot",
|
||||
@@ -48,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedNodesCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proto_array_validated_nodes_count",
|
||||
Help: "The number of nodes that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,7 +43,6 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) er
|
||||
if index == NonExistentNode {
|
||||
break
|
||||
}
|
||||
validatedNodesCount.Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -426,6 +428,16 @@ func (s *Store) applyWeightChanges(
|
||||
if nodeDelta < 0 {
|
||||
d := uint64(-nodeDelta)
|
||||
if n.weight < d {
|
||||
s.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeDelta": d,
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
|
||||
"nodeWeight": n.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": s.previousProposerBoostScore,
|
||||
}).Warning("node with invalid weight, setting it to zero")
|
||||
s.proposerBoostLock.RUnlock()
|
||||
n.weight = 0
|
||||
} else {
|
||||
n.weight -= d
|
||||
@@ -602,16 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
node := copyNode(s.nodes[idx])
|
||||
parentIdx, ok := canonicalNodesMap[node.parent]
|
||||
if ok {
|
||||
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
|
||||
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
|
||||
currentIndex := uint64(len(canonicalNodes))
|
||||
s.nodesIndices[node.root] = currentIndex
|
||||
s.payloadIndices[node.payloadHash] = currentIndex
|
||||
canonicalNodesMap[idx] = currentIndex
|
||||
node.parent = parentIdx
|
||||
canonicalNodes = append(canonicalNodes, node)
|
||||
} else {
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
// Remove node that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
delete(s.canonicalNodes, node.root)
|
||||
delete(s.payloadIndices, node.payloadHash)
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
s.canonicalNodes[finalizedRoot] = true
|
||||
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
|
||||
|
||||
// Recompute the best child and descendant for each canonical nodes.
|
||||
for _, node := range canonicalNodes {
|
||||
|
||||
@@ -375,7 +375,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
@@ -413,7 +413,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
@@ -441,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: 1,
|
||||
root: indexToHash(uint64(0)),
|
||||
parent: NonExistentNode,
|
||||
payloadHash: [32]byte{'A'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -448,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
parent: 0,
|
||||
payloadHash: [32]byte{'B'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -455,6 +457,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
parent: 0,
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
payloadHash: [32]byte{'C'},
|
||||
},
|
||||
}
|
||||
s := &Store{
|
||||
@@ -465,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(1)): 1,
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
canonicalNodes: map[[32]byte]bool{
|
||||
indexToHash(uint64(0)): true,
|
||||
indexToHash(uint64(1)): true,
|
||||
indexToHash(uint64(2)): true,
|
||||
},
|
||||
payloadIndices: map[[32]byte]uint64{
|
||||
[32]byte{'A'}: 0,
|
||||
[32]byte{'B'}: 1,
|
||||
[32]byte{'C'}: 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
require.Equal(t, 1, len(s.nodes))
|
||||
require.Equal(t, 1, len(s.nodesIndices))
|
||||
require.Equal(t, 1, len(s.canonicalNodes))
|
||||
require.Equal(t, 1, len(s.payloadIndices))
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -482,25 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
func TestStore_PruneBranched(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, f.NodeCount())
|
||||
tests := []struct {
|
||||
finalizedRoot [32]byte
|
||||
wantedCanonical [32]byte
|
||||
wantedNonCanonical [32]byte
|
||||
canonicalCount int
|
||||
payloadHash [32]byte
|
||||
payloadIndex uint64
|
||||
nonExistentPayload [32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'a'},
|
||||
1,
|
||||
[32]byte{'F'},
|
||||
0,
|
||||
[32]byte{'H'},
|
||||
},
|
||||
{
|
||||
[32]byte{'d'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
3,
|
||||
[32]byte{'E'},
|
||||
1,
|
||||
[32]byte{'C'},
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'h'},
|
||||
5,
|
||||
[32]byte{'D'},
|
||||
3,
|
||||
[32]byte{'A'},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
|
||||
|
||||
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
|
||||
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
|
||||
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
|
||||
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
|
||||
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/gateway:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
@@ -94,6 +95,7 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
depositCache *depositcache.DepositCache
|
||||
proposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
@@ -152,6 +154,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -585,6 +588,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -801,6 +805,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
})
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
@@ -16,6 +15,7 @@ go_library(
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
|
||||
@@ -59,7 +59,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -74,7 +73,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
@@ -125,7 +123,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
if !hasTtdReached {
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -11,11 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
|
||||
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithJWTSecret(secret []byte) Option {
|
||||
return func(c *Service) error {
|
||||
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
|
||||
return func(s *Service) error {
|
||||
if len(secret) == 0 {
|
||||
return nil
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
stringEndpoints := dedupEndpoints(endpointStrings)
|
||||
endpoints := make([]network.Endpoint, len(stringEndpoints))
|
||||
// Overwrite authorization type for all endpoints to be of a bearer
|
||||
// type.
|
||||
for i, e := range stringEndpoints {
|
||||
hEndpoint := HttpEndpoint(e)
|
||||
hEndpoint.Auth.Method = authorization.Bearer
|
||||
hEndpoint.Auth.Value = string(secret)
|
||||
endpoints[i] = hEndpoint
|
||||
}
|
||||
c.cfg.httpRPCClient = &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
// Select first http endpoint in the provided list.
|
||||
var currEndpoint network.Endpoint
|
||||
if len(endpointStrings) > 0 {
|
||||
currEndpoint = endpoints[0]
|
||||
}
|
||||
s.cfg.httpEndpoints = endpoints
|
||||
s.cfg.currHttpEndpoint = currEndpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
176
beacon-chain/powchain/rpc_connection.go
Normal file
176
beacon-chain/powchain/rpc_connection.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
// Attach the clients to the service struct.
|
||||
fetcher := ethclient.NewClient(client)
|
||||
s.rpcClient = client
|
||||
s.httpLogger = fetcher
|
||||
s.eth1DataFetcher = fetcher
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
|
||||
if err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
|
||||
// connection and if this does not work, we fallback to the next endpoint if defined.
|
||||
func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.runError = err
|
||||
s.fallbackToNextEndpoint()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Forces to retry an execution client connection.
|
||||
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is
|
||||
// expected to be small, it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Checks the chain ID of the execution client to ensure
|
||||
// it matches local parameters of what Prysm expects.
|
||||
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
|
||||
cID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wantChainID := params.BeaconConfig().DepositChainID
|
||||
if cID.Uint64() != wantChainID {
|
||||
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -39,10 +38,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
@@ -114,6 +111,7 @@ type Chain interface {
|
||||
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
|
||||
// fetching eth1 data from the clients.
|
||||
type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
type RPCClient interface {
|
||||
Close()
|
||||
BatchCall(b []gethRPC.BatchElem) error
|
||||
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
|
||||
}
|
||||
@@ -135,7 +134,6 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
httpRPCClient *http.Client
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
log.WithError(err).Fatal("Could not connect to execution endpoint")
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
|
||||
}).Info("Connected to Ethereum execution client RPC")
|
||||
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
|
||||
// able to generate the genesis state.
|
||||
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
|
||||
@@ -253,7 +246,7 @@ func (s *Service) Start() {
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
go s.pollConnectionStatus()
|
||||
go s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
|
||||
if s.cancel != nil {
|
||||
defer s.cancel()
|
||||
}
|
||||
s.closeClients()
|
||||
if s.rpcClient != nil {
|
||||
s.rpcClient.Close()
|
||||
}
|
||||
if s.eth1DataFetcher != nil {
|
||||
s.eth1DataFetcher.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
|
||||
|
||||
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
|
||||
func (s *Service) CurrentETH1ConnectionError() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
return err
|
||||
return s.runError
|
||||
}
|
||||
|
||||
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
|
||||
@@ -358,10 +353,17 @@ func (s *Service) ETH1Endpoints() []string {
|
||||
func (s *Service) ETH1ConnectionErrors() []error {
|
||||
var errs []error
|
||||
for _, ep := range s.cfg.httpEndpoints {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
errs = append(errs, err)
|
||||
client, err := s.newRPCClientWithAuth(s.ctx, ep)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
|
||||
client.Close()
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
client.Close()
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@@ -376,146 +378,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
return latestValidBlock, nil
|
||||
}
|
||||
|
||||
func (s *Service) connectToPowChain() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
|
||||
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
|
||||
return errors.New("execution client RPC is nil")
|
||||
}
|
||||
s.httpLogger = httpClient
|
||||
s.eth1DataFetcher = httpClient
|
||||
s.depositContractCaller = depositContractCaller
|
||||
s.rpcClient = rpcClient
|
||||
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
|
||||
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpRPCClient.SetHeader("Authorization", header)
|
||||
}
|
||||
httpClient := ethclient.NewClient(httpRPCClient)
|
||||
// Add a method to clean-up and close clients in the event
|
||||
// of any connection failure.
|
||||
closeClients := func() {
|
||||
httpRPCClient.Close()
|
||||
httpClient.Close()
|
||||
}
|
||||
// Make a simple call to ensure we are actually connected to a working node.
|
||||
cID, err := httpClient.ChainID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
nID, err := httpClient.NetworkID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if cID.Uint64() != params.BeaconConfig().DepositChainID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
|
||||
}
|
||||
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
|
||||
}
|
||||
|
||||
return httpClient, httpRPCClient, nil
|
||||
}
|
||||
|
||||
// closes down our active eth1 clients.
|
||||
func (s *Service) closeClients() {
|
||||
gethClient, ok := s.rpcClient.(*gethRPC.Client)
|
||||
if ok {
|
||||
gethClient.Close()
|
||||
}
|
||||
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
|
||||
if ok {
|
||||
httpClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pollConnectionStatus() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
errConnect := s.connectToPowChain()
|
||||
if errConnect != nil {
|
||||
errorLogger(errConnect, "Could not connect to powchain endpoint")
|
||||
s.runError = errConnect
|
||||
s.fallbackToNextEndpoint()
|
||||
continue
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the eth1 node is healthy and ready to serve before
|
||||
// fetching data from it.
|
||||
func (s *Service) isEth1NodeSynced() (bool, error) {
|
||||
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if syncProg != nil {
|
||||
return false, nil
|
||||
}
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !eth1HeadIsBehind(head.Time), nil
|
||||
}
|
||||
|
||||
// Reconnect to eth1 node in case of any failure.
|
||||
func (s *Service) retryETH1Node(err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before
|
||||
// resuming dialing the eth1 node.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
|
||||
if len(ctrs) == 0 {
|
||||
return nil
|
||||
@@ -650,7 +512,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
|
||||
// check that web3 client is syncing
|
||||
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
|
||||
log.Warn("eth1 client is not syncing")
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
@@ -680,6 +542,15 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
|
||||
func (s *Service) initPOWService() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
|
||||
// Run in a select loop to retry in the event of any failures.
|
||||
for {
|
||||
@@ -690,8 +561,8 @@ func (s *Service) initPOWService() {
|
||||
ctx := s.ctx
|
||||
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve latest execution client header")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -700,14 +571,14 @@ func (s *Service) initPOWService() {
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to process past deposit contract logs")
|
||||
continue
|
||||
}
|
||||
// Cache eth1 headers from our voting period.
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to cache headers for execution client votes")
|
||||
continue
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
@@ -720,15 +591,15 @@ func (s *Service) initPOWService() {
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
|
||||
continue
|
||||
}
|
||||
genBlock = genHeader.Number.Uint64()
|
||||
}
|
||||
s.chainStartData.GenesisBlock = genBlock
|
||||
if err := s.savePowchainData(ctx); err != nil {
|
||||
log.Errorf("Unable to save powchain data: %v", err)
|
||||
errorLogger(err, "Unable to save execution client data")
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -757,17 +628,16 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.retryExecutionClientConnection(s.ctx, err)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
s.retryETH1Node(errFarBehind)
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
s.checkDefaultEndpoint()
|
||||
s.checkDefaultEndpoint(s.ctx)
|
||||
case <-chainstartTicker.C:
|
||||
if s.chainStartData.Chainstarted {
|
||||
chainstartTicker.Stop()
|
||||
@@ -854,59 +724,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
return hdr.Number.Uint64(), nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint() {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
|
||||
if err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("Primary endpoint ready again, switching back to it")
|
||||
// Close the clients and let our main connection routine
|
||||
// properly connect with it.
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
// Close current active clients.
|
||||
s.closeClients()
|
||||
|
||||
// Switch back to primary endpoint and try connecting
|
||||
// to it again.
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
s.retryETH1Node(nil)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is expected to be
|
||||
// small ( < 25), it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// initializes our service from the provided eth1data object by initializing all the relevant
|
||||
// fields and data.
|
||||
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {
|
||||
|
||||
@@ -42,6 +42,8 @@ type goodLogger struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodLogger) Close() {}
|
||||
|
||||
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
if g.backend == nil {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
@@ -80,6 +82,8 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodFetcher) Close() {}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
now := time.Now()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
synced, err := web3Service.isEth1NodeSynced()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
|
||||
}
|
||||
|
||||
func TestFollowBlock_OK(t *testing.T) {
|
||||
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
|
||||
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
|
||||
@@ -144,6 +144,8 @@ type RPCClient struct {
|
||||
Backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *RPCClient) Close() {}
|
||||
|
||||
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ type executionPayloadHeaderJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipt_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
|
||||
@@ -1024,19 +1024,20 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
|
||||
@@ -79,6 +79,8 @@ go_library(
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -138,7 +138,7 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
// Query the next epoch assignments for committee subnet subscriptions.
|
||||
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
|
||||
nextCommitteeAssignments, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
|
||||
}
|
||||
@@ -180,6 +180,16 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
nextAssignment.AttesterSlot = ca.AttesterSlot
|
||||
nextAssignment.CommitteeIndex = ca.CommitteeIndex
|
||||
}
|
||||
// Cache proposer assignment for the current epoch.
|
||||
for _, slot := range proposerIndexToSlots[idx] {
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
|
||||
}
|
||||
// Cache proposer assignment for the next epoch.
|
||||
for _, slot := range nextProposerIndexToSlots[idx] {
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
|
||||
}
|
||||
// Prune payload ID cache for any slots before request slot.
|
||||
vs.ProposerSlotIndexCache.PrunePayloadIDs(epochStartSlot)
|
||||
} else {
|
||||
// If the validator isn't in the beacon state, try finding their deposit to determine their status.
|
||||
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)
|
||||
|
||||
@@ -60,9 +60,10 @@ func TestGetDuties_OK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -144,10 +145,11 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -181,12 +183,12 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
require.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
}
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
require.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
|
||||
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
|
||||
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
|
||||
@@ -197,7 +199,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,10 +251,11 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -302,7 +305,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,11 +343,12 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
DepositFetcher: depositCache,
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
DepositFetcher: depositCache,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
|
||||
@@ -399,9 +403,10 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) {
|
||||
State: bState, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -437,9 +442,10 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
pubkey0 := deposits[0].Data.PublicKey
|
||||
@@ -503,11 +509,12 @@ func TestStreamDuties_OK(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -560,11 +567,12 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
|
||||
@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
case version.Bellatrix:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
||||
|
||||
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
|
||||
depositTrie = finalizedDeposits.Deposits
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
|
||||
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range upToEth1DataDeposits {
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -24,9 +26,31 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// payloadIDCacheMiss tracks the number of payload ID requests that aren't present in the cache.
|
||||
payloadIDCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_id_cache_miss",
|
||||
Help: "The number of payload id get requests that aren't present in the cache.",
|
||||
})
|
||||
// payloadIDCacheHit tracks the number of payload ID requests that are present in the cache.
|
||||
payloadIDCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_id_cache_hit",
|
||||
Help: "The number of payload id get requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex) (*enginev1.ExecutionPayload, error) {
|
||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
||||
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadId[:])
|
||||
payloadIDCacheHit.Inc()
|
||||
return vs.ExecutionEngineCaller.GetPayload(ctx, pid)
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
powtesting "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -106,6 +107,11 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
payloadID: &pb.PayloadIDBytes{0x1},
|
||||
validatorIndx: 1,
|
||||
},
|
||||
{
|
||||
name: "transition completed, happy case, payload ID cached)",
|
||||
st: transitionSt,
|
||||
validatorIndx: 100,
|
||||
},
|
||||
{
|
||||
name: "transition completed, could not prepare payload",
|
||||
st: transitionSt,
|
||||
@@ -132,10 +138,12 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
vs := &Server{
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
BeaconDB: beaconDB,
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -2344,7 +2345,8 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
AttestationCache *cache.AttestationCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
|
||||
@@ -109,6 +109,7 @@ type Config struct {
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller powchain.EngineCaller
|
||||
ProposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -207,6 +208,7 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
|
||||
@@ -151,6 +151,15 @@ func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete the state from the epoch boundary state cache.
|
||||
func (e *epochBoundaryState) delete(r [32]byte) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
return e.rootStateCache.Delete(&rootStateInfo{
|
||||
root: r,
|
||||
})
|
||||
}
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestEpochBoundaryStateCache_BadRootKey(t *testing.T) {
|
||||
assert.ErrorContains(t, errNotRootStateInfo.Error(), err, "Did not get wanted error")
|
||||
}
|
||||
|
||||
func TestEpochBoundaryStateCache_CanSave(t *testing.T) {
|
||||
func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
|
||||
e := newBoundaryStateCache()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -46,6 +46,17 @@ func TestEpochBoundaryStateCache_CanSave(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, exists, "Should exist")
|
||||
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
|
||||
|
||||
require.NoError(t, e.delete(r))
|
||||
got, exists, err = e.getByRoot([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
|
||||
got, exists, err = e.getBySlot(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
}
|
||||
|
||||
func TestEpochBoundaryStateCache_CanTrim(t *testing.T) {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
|
||||
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache
|
||||
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
|
||||
if !s.hotStateCache.has(blockRoot) {
|
||||
return nil
|
||||
@@ -146,6 +146,12 @@ func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*e
|
||||
return nil, errors.New("could not find block in DB")
|
||||
}
|
||||
|
||||
// DeleteStateFromCaches deletes the state from the caches.
|
||||
func (s *State) DeleteStateFromCaches(_ context.Context, blockRoot [32]byte) error {
|
||||
s.hotStateCache.delete(blockRoot)
|
||||
return s.epochBoundaryStateCache.delete(blockRoot)
|
||||
}
|
||||
|
||||
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
|
||||
func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")
|
||||
|
||||
@@ -165,6 +165,35 @@ func TestStateByRoot_HotStateCached(t *testing.T) {
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestDeleteStateFromCaches(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err := service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
|
||||
service.hotStateCache.put(r, beaconState)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(r, beaconState))
|
||||
|
||||
require.Equal(t, true, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
|
||||
require.NoError(t, service.DeleteStateFromCaches(ctx, r))
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
func TestStateByRoot_StateByRootInitialSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewMockService() *MockStateManager {
|
||||
}
|
||||
}
|
||||
|
||||
// StateByRootIfCached
|
||||
// StateByRootIfCachedNoCopy
|
||||
func (_ *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -124,3 +124,8 @@ func (m *MockStateManager) AddStateForRoot(state state.BeaconState, blockRoot [3
|
||||
func (m *MockStateManager) AddStateForSlot(state state.BeaconState, slot types.Slot) {
|
||||
m.StatesBySlot[slot] = state
|
||||
}
|
||||
|
||||
// DeleteStateFromCaches --
|
||||
func (m *MockStateManager) DeleteStateFromCaches(context.Context, [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ type StateManager interface {
|
||||
ForceCheckpoint(ctx context.Context, root []byte) error
|
||||
EnableSaveHotStateToDB(_ context.Context)
|
||||
DisableSaveHotStateToDB(ctx context.Context) error
|
||||
DeleteStateFromCaches(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// State is a concrete implementation of StateManager.
|
||||
|
||||
@@ -15,7 +15,7 @@ var (
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
Usage: "A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
|
||||
Value: "",
|
||||
Value: "http://localhost:8545",
|
||||
}
|
||||
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
|
||||
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests
|
||||
|
||||
@@ -27,7 +27,7 @@ func FlagOptions(c *cli.Context) ([]powchain.Option, error) {
|
||||
powchain.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
|
||||
}
|
||||
if len(jwtSecret) > 0 {
|
||||
opts = append(opts, powchain.WithJWTSecret(jwtSecret))
|
||||
opts = append(opts, powchain.WithHttpEndpointsAndJWTSecret(endpoints, jwtSecret))
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.DepositChainID = 1337 // Chain ID of eth1 dev net.
|
||||
e2eConfig.DepositNetworkID = 1337 // Network ID of eth1 dev net.
|
||||
|
||||
// Altair Fork Parameters.
|
||||
// Fork Parameters.
|
||||
e2eConfig.AltairForkEpoch = altairE2EForkEpoch
|
||||
e2eConfig.BellatrixForkEpoch = bellatrixE2EForkEpoch
|
||||
|
||||
|
||||
@@ -3,24 +3,32 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"endpoint.go",
|
||||
"external_ip.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/network",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//network/authorization:go_default_library"],
|
||||
deps = [
|
||||
"//network/authorization:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"endpoint_test.go",
|
||||
"external_ip_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package powchain
|
||||
package network
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
// This creates a custom HTTP transport which we can attach to our HTTP client
|
||||
// in order to inject JWT auth strings into our HTTP request headers. Authentication
|
||||
// is required when interacting with an Ethereum engine API server via HTTP, and JWT
|
||||
@@ -1,4 +1,4 @@
|
||||
package powchain
|
||||
package network
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -2,6 +2,7 @@ package network
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
@@ -24,6 +25,22 @@ func (e Endpoint) Equals(other Endpoint) bool {
|
||||
return e.Url == other.Url && e.Auth.Equals(other.Auth)
|
||||
}
|
||||
|
||||
// HttpClient creates a http client object dependant
|
||||
// on the properties of the network endpoint.
|
||||
func (e Endpoint) HttpClient() *http.Client {
|
||||
if e.Auth.Method != authorization.Bearer {
|
||||
return http.DefaultClient
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: []byte(e.Auth.Value),
|
||||
}
|
||||
return &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
}
|
||||
}
|
||||
|
||||
// Equals compares two authorization data objects for equality.
|
||||
func (d AuthorizationData) Equals(other AuthorizationData) bool {
|
||||
return d.Method == other.Method && d.Value == other.Value
|
||||
|
||||
1042
proto/eth/service/key_management.pb.go
generated
1042
proto/eth/service/key_management.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -123,6 +123,92 @@ func local_request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshale
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.ListRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.ListRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ImportRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.ImportRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ImportRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.ImportRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.DeleteRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
|
||||
// UnaryRPC :call KeyManagementServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -198,6 +284,75 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -299,6 +454,66 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -308,6 +523,12 @@ var (
|
||||
pattern_KeyManagement_ImportKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
|
||||
|
||||
pattern_KeyManagement_ListRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_ImportRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -316,4 +537,10 @@ var (
|
||||
forward_KeyManagement_ImportKeystores_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteKeystores_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -84,6 +84,27 @@ service KeyManagement {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
rpc ListRemoteKeys(google.protobuf.Empty) returns (ListRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/internal/eth/v1/remotekeys"
|
||||
};
|
||||
}
|
||||
|
||||
rpc ImportRemoteKeys(ImportRemoteKeysRequest) returns (ImportRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/internal/eth/v1/remotekeys",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
rpc DeleteRemoteKeys(DeleteRemoteKeysRequest) returns (DeleteRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
delete: "/internal/eth/v1/remotekeys",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message ListKeystoresResponse {
|
||||
@@ -133,3 +154,55 @@ message DeletedKeystoreStatus {
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
|
||||
message ListRemoteKeysResponse {
|
||||
message Keystore {
|
||||
bytes pubkey = 1;
|
||||
string url = 2;
|
||||
bool readonly = 3;
|
||||
}
|
||||
repeated Keystore data = 1;
|
||||
}
|
||||
|
||||
message ImportRemoteKeysRequest {
|
||||
message Keystore {
|
||||
bytes pubkey = 1;
|
||||
string url = 2;
|
||||
}
|
||||
repeated Keystore remote_keys = 1;
|
||||
}
|
||||
|
||||
message ImportRemoteKeysResponse {
|
||||
repeated ImportedRemoteKeysStatus data = 1;
|
||||
}
|
||||
|
||||
message DeleteRemoteKeysRequest {
|
||||
repeated bytes pubkeys = 1;
|
||||
}
|
||||
|
||||
message DeleteRemoteKeysResponse {
|
||||
repeated DeletedRemoteKeysStatus data = 1;
|
||||
}
|
||||
|
||||
message ImportedRemoteKeysStatus {
|
||||
enum Status {
|
||||
UNKNOWN = 0;
|
||||
IMPORTED = 1;
|
||||
DUPLICATE = 2;
|
||||
ERROR = 3;
|
||||
}
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
message DeletedRemoteKeysStatus {
|
||||
enum Status {
|
||||
NOT_FOUND = 0;
|
||||
DELETED = 1;
|
||||
ERROR = 3; // skips 2 to match Delete KeyStore status which has error = 3.
|
||||
}
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
|
||||
1546
proto/prysm/v1alpha1/validator.pb.go
generated
1546
proto/prysm/v1alpha1/validator.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -360,6 +360,9 @@ message StreamBlocksResponse {
|
||||
|
||||
// Representing an altair block.
|
||||
SignedBeaconBlockAltair altair_block = 2;
|
||||
|
||||
// Representing a bellatrix block.
|
||||
SignedBeaconBlockBellatrix bellatrix_block = 3;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/slasher/simulator:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"lighthouse_beacon.go",
|
||||
"lighthouse_validator.go",
|
||||
"log.go",
|
||||
"proxy.go",
|
||||
"tracing_sink.go",
|
||||
"validator.go",
|
||||
"web3remotesigner.go",
|
||||
@@ -29,6 +30,7 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/endtoend/components/eth1:go_default_library",
|
||||
"//testing/endtoend/helpers:go_default_library",
|
||||
|
||||
@@ -117,8 +117,10 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
expectedNumOfPeers += 1
|
||||
}
|
||||
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index)+"/")
|
||||
eth1Port := e2e.TestParams.Ports.Eth1AuthRPCPort
|
||||
if index == 0 {
|
||||
jwtPath = path.Join(e2e.TestParams.TestPath, "eth1data/miner/")
|
||||
eth1Port = e2e.TestParams.Ports.Eth1ProxyPort
|
||||
}
|
||||
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
|
||||
args := []string{
|
||||
@@ -126,7 +128,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
|
||||
fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
|
||||
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+index),
|
||||
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1RPCPort),
|
||||
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, eth1Port+index),
|
||||
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
|
||||
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),
|
||||
|
||||
@@ -89,7 +89,11 @@ func (node *Node) Start(ctx context.Context) error {
|
||||
"--ipcdisable",
|
||||
"--verbosity=4",
|
||||
}
|
||||
|
||||
// If we are testing sync, geth needs to be run via full sync as snap sync does not
|
||||
// work in our setup.
|
||||
if node.index == e2e.TestParams.BeaconNodeCount+e2e.TestParams.LighthouseBeaconNodeCount {
|
||||
args = append(args, []string{"--syncmode=full"}...)
|
||||
}
|
||||
runCmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
|
||||
file, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1_"+strconv.Itoa(node.index)+".log")
|
||||
if err != nil {
|
||||
|
||||
511
testing/endtoend/components/proxy.go
Normal file
511
testing/endtoend/components/proxy.go
Normal file
@@ -0,0 +1,511 @@
|
||||
package components
|
||||
|
||||
// Package main provides a proxy middleware for engine API requests between Ethereum
|
||||
// consensus clients and execution clients accordingly. Allows for configuration of various
|
||||
// test cases using yaml files as detailed in the README.md of the document.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
spoofingConfigFile = flag.String("spoofing-config", "tools/engine-proxy/spoofing_config.yaml", "")
|
||||
executionEndpoint = flag.String("execution-endpoint", "http://localhost:8545", "")
|
||||
fuzz = flag.Bool(
|
||||
"fuzz",
|
||||
false,
|
||||
"fuzzes requests and responses, overrides -spoofing-config if -fuzz is set",
|
||||
)
|
||||
)
|
||||
|
||||
type spoofingConfig struct {
|
||||
Requests []*spoof `yaml:"requests"`
|
||||
Responses []*spoof `yaml:"responses"`
|
||||
}
|
||||
|
||||
type spoof struct {
|
||||
Method string
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
type jsonRPCObject struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params"`
|
||||
ID uint64 `json:"id"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
type forkchoiceUpdatedResponse struct {
|
||||
Status *pb.PayloadStatus `json:"payloadStatus"`
|
||||
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
|
||||
}
|
||||
|
||||
type InterceptorFunc func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool
|
||||
|
||||
type ProxyNode struct {
|
||||
address string
|
||||
srv *http.Server
|
||||
destAddress string
|
||||
logger *logrus.Logger
|
||||
logEntry *logrus.Entry
|
||||
interceptor InterceptorFunc
|
||||
backedUpRequests []*http.Request
|
||||
}
|
||||
|
||||
func NewProxyNode(proxyPort int, destAddress string) *ProxyNode {
|
||||
pn := &ProxyNode{}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", pn.proxyHandler())
|
||||
addr := "127.0.0.1:" + strconv.Itoa(proxyPort)
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
Addr: addr,
|
||||
}
|
||||
pn.address = addr
|
||||
pn.srv = srv
|
||||
pn.destAddress = destAddress
|
||||
return pn
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) Start(ctx context.Context) error {
|
||||
stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1-proxy.log")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pn.logger = logrus.New()
|
||||
pn.logger.SetOutput(stdOutFile)
|
||||
pn.logEntry = pn.logger.WithField("prefix", "engine-proxy")
|
||||
pn.logEntry.Infof("Engine proxy now listening on address %s", pn.address)
|
||||
pn.srv.BaseContext = func(listener net.Listener) context.Context {
|
||||
return ctx
|
||||
}
|
||||
go func() {
|
||||
if err := pn.srv.ListenAndServe(); err != nil {
|
||||
pn.logEntry.Error(err)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pn.srv.Shutdown(context.Background())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) AddInterceptor(icptr InterceptorFunc) {
|
||||
pn.interceptor = icptr
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) SyncingInterceptor() InterceptorFunc {
|
||||
return func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
|
||||
if !pn.checkIfValid(reqBytes) {
|
||||
return false
|
||||
}
|
||||
pn.returnSyncingResponse(reqBytes, w, r)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Proxies requests from a consensus client to an execution client, spoofing requests
|
||||
// and/or responses as desired. Acts as a middleware useful for testing different merge scenarios.
|
||||
func (pn *ProxyNode) proxyHandler() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestBytes, err := pn.parseRequestBytes(r)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not parse request")
|
||||
return
|
||||
}
|
||||
if pn.interceptor != nil && pn.interceptor(requestBytes, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, rq := range pn.backedUpRequests {
|
||||
requestB, err := pn.parseRequestBytes(rq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not parse request")
|
||||
return
|
||||
}
|
||||
destAddr := "http://" + pn.destAddress
|
||||
|
||||
// Create a new proxy request to the execution client.
|
||||
url := rq.URL
|
||||
url.Host = destAddr
|
||||
proxyReq, err := http.NewRequest(rq.Method, destAddr, rq.Body)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could create new request")
|
||||
return
|
||||
}
|
||||
|
||||
// Set the modified request as the proxy request body.
|
||||
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestB))
|
||||
|
||||
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
|
||||
proxyReq.Header.Set("Host", rq.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", rq.RemoteAddr)
|
||||
proxyReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
proxyRes, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
// Pipe the proxy response to the original caller.
|
||||
if _, err = io.Copy(buf, proxyRes.Body); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
if err = proxyRes.Body.Close(); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
pn.logEntry.Infof("Queued Request Response: %s", buf.String())
|
||||
}
|
||||
pn.backedUpRequests = []*http.Request{}
|
||||
|
||||
/*
|
||||
var modifiedReq []byte
|
||||
if *fuzz {
|
||||
modifiedReq, err = fuzzRequest(requestBytes)
|
||||
} else {
|
||||
// We optionally spoof the request as desired.
|
||||
modifiedReq, err = spoofRequest(config, requestBytes)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to spoof request")
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
destAddr := "http://" + pn.destAddress
|
||||
|
||||
// Create a new proxy request to the execution client.
|
||||
url := r.URL
|
||||
url.Host = destAddr
|
||||
proxyReq, err := http.NewRequest(r.Method, destAddr, r.Body)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could create new request")
|
||||
return
|
||||
}
|
||||
|
||||
// Set the modified request as the proxy request body.
|
||||
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
|
||||
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
|
||||
proxyReq.Header.Set("Host", r.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
||||
proxyReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
proxyRes, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// We optionally spoof the response as desired.
|
||||
var modifiedResp []byte
|
||||
if *fuzz {
|
||||
modifiedResp, err = fuzzResponse(proxyRes.Body)
|
||||
} else {
|
||||
modifiedResp, err = spoofResponse(config, requestBytes, proxyRes.Body)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to spoof response")
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
// Pipe the proxy response to the original caller.
|
||||
if _, err = io.Copy(w, proxyRes.Body); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
if err = proxyRes.Body.Close(); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) parseRequestBytes(req *http.Request) ([]byte, error) {
|
||||
requestBytes, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = req.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pn.logEntry.Infof("%s", string(requestBytes))
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
return requestBytes, nil
|
||||
}
|
||||
|
||||
func fuzzRequest(requestBytes []byte) ([]byte, error) {
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return requestBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(jsonRequest.Params) == 0 {
|
||||
return requestBytes, nil
|
||||
}
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], ¶ms); err != nil {
|
||||
return requestBytes, nil
|
||||
}
|
||||
// 10% chance to fuzz a field.
|
||||
fuzzProbability := uint64(5) // TODO: Do not hardcode.
|
||||
for k, v := range params {
|
||||
// Each field has a probability of of being fuzzed.
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(100))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch obj := v.(type) {
|
||||
case string:
|
||||
if strings.Contains(obj, "0x") && r.Uint64() <= fuzzProbability {
|
||||
// Ignore hex strings of odd length.
|
||||
if len(obj)%2 != 0 {
|
||||
continue
|
||||
}
|
||||
// Generate some random hex string with same length and set it.
|
||||
// TODO: Experiment with length < current field, or junk in general.
|
||||
hexBytes, err := hexutil.Decode(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fuzzedStr, err := randomHex(len(hexBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Warnf(
|
||||
"Fuzzing field %s, modifying value from %s to %s",
|
||||
k,
|
||||
obj,
|
||||
fuzzedStr,
|
||||
)
|
||||
params[k] = fuzzedStr
|
||||
}
|
||||
}
|
||||
}
|
||||
jsonRequest.Params[0] = params
|
||||
return json.Marshal(jsonRequest)
|
||||
}
|
||||
|
||||
// Parses the request from thec consensus client and checks if user desires
|
||||
// to spoof it based on the JSON-RPC method. If so, it returns the modified
|
||||
// request bytes which will be proxied to the execution client.
|
||||
func spoofRequest(config *spoofingConfig, requestBytes []byte) ([]byte, error) {
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return requestBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(jsonRequest.Params) == 0 {
|
||||
return requestBytes, nil
|
||||
}
|
||||
desiredMethodsToSpoof := make(map[string]*spoof)
|
||||
for _, spoofReq := range config.Requests {
|
||||
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
|
||||
}
|
||||
// If we don't want to spoof the request, just return the request as-is.
|
||||
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
|
||||
if !ok {
|
||||
return requestBytes, nil
|
||||
}
|
||||
|
||||
// TODO: Support methods with multiple params.
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], ¶ms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for fieldToModify, fieldValue := range spoofDetails.Fields {
|
||||
if _, ok := params[fieldToModify]; !ok {
|
||||
continue
|
||||
}
|
||||
params[fieldToModify] = fieldValue
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Infof("Modified request %v", params)
|
||||
jsonRequest.Params[0] = params
|
||||
return json.Marshal(jsonRequest)
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) checkIfValid(reqBytes []byte) bool {
|
||||
jsonRequest, err := unmarshalRPCObject(reqBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
if strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1") ||
|
||||
strings.Contains(jsonRequest.Method, "engine_newPayloadV1") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) returnSyncingResponse(reqBytes []byte, w http.ResponseWriter, r *http.Request) {
|
||||
jsonRequest, err := unmarshalRPCObject(reqBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1"):
|
||||
resp := &forkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_SYNCING,
|
||||
},
|
||||
PayloadId: nil,
|
||||
}
|
||||
jResp := &jsonRPCObject{
|
||||
Method: jsonRequest.Method,
|
||||
ID: jsonRequest.ID,
|
||||
Result: resp,
|
||||
}
|
||||
rawResp, err := json.Marshal(jResp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(rawResp)
|
||||
_ = err
|
||||
return
|
||||
case strings.Contains(jsonRequest.Method, "engine_newPayloadV1"):
|
||||
resp := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_SYNCING,
|
||||
}
|
||||
jResp := &jsonRPCObject{
|
||||
Method: jsonRequest.Method,
|
||||
ID: jsonRequest.ID,
|
||||
Result: resp,
|
||||
}
|
||||
rawResp, err := json.Marshal(jResp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(rawResp)
|
||||
_ = err
|
||||
pn.backedUpRequests = append(pn.backedUpRequests, r)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fuzzResponse(responseBody io.Reader) ([]byte, error) {
|
||||
responseBytes, err := ioutil.ReadAll(responseBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return responseBytes, nil
|
||||
}
|
||||
|
||||
// Parses the response body from the execution client and checks if user desires
|
||||
// to spoof it based on the JSON-RPC method. If so, it returns the modified
|
||||
// response bytes which will be proxied to the consensus client.
|
||||
func spoofResponse(config *spoofingConfig, requestBytes []byte, responseBody io.Reader) ([]byte, error) {
|
||||
responseBytes, err := ioutil.ReadAll(responseBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return responseBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonResponse, err := unmarshalRPCObject(responseBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return responseBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
desiredMethodsToSpoof := make(map[string]*spoof)
|
||||
for _, spoofReq := range config.Responses {
|
||||
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
|
||||
}
|
||||
// If we don't want to spoof the request, just return the request as-is.
|
||||
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
|
||||
if !ok {
|
||||
return responseBytes, nil
|
||||
}
|
||||
|
||||
// TODO: Support nested objects.
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonResponse.Result, ¶ms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for fieldToModify, fieldValue := range spoofDetails.Fields {
|
||||
if _, ok := params[fieldToModify]; !ok {
|
||||
continue
|
||||
}
|
||||
params[fieldToModify] = fieldValue
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Infof("Modified response %v", params)
|
||||
jsonResponse.Result = params
|
||||
return json.Marshal(jsonResponse)
|
||||
}
|
||||
|
||||
func unmarshalRPCObject(rawBytes []byte) (*jsonRPCObject, error) {
|
||||
jsonObj := &jsonRPCObject{}
|
||||
if err := json.Unmarshal(rawBytes, jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jsonObj, nil
|
||||
}
|
||||
|
||||
func extractObjectFromJSONRPC(src interface{}, dst interface{}) error {
|
||||
rawResp, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(rawResp, dst)
|
||||
}
|
||||
|
||||
func randomHex(n int) (string, error) {
|
||||
b := make([]byte, n)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "0x" + hex.EncodeToString(b), nil
|
||||
}
|
||||
@@ -7,8 +7,10 @@ package endtoend
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -27,6 +29,7 @@ import (
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
@@ -79,7 +82,10 @@ func (r *testRunner) run() {
|
||||
g.Go(func() error {
|
||||
return tracingSink.Start(ctx)
|
||||
})
|
||||
|
||||
proxyNode := components.NewProxyNode(e2e.TestParams.Ports.Eth1ProxyPort, "127.0.0.1:"+strconv.Itoa(e2e.TestParams.Ports.Eth1RPCPort))
|
||||
g.Go(func() error {
|
||||
return proxyNode.Start(ctx)
|
||||
})
|
||||
if multiClientActive {
|
||||
keyGen = components.NewKeystoreGenerator()
|
||||
|
||||
@@ -254,6 +260,17 @@ func (r *testRunner) run() {
|
||||
require.NoError(t, err)
|
||||
tickingStartTime := helpers.EpochTickerStartTime(genesis)
|
||||
|
||||
proxyNode.AddInterceptor(func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
|
||||
currSlot := slots.CurrentSlot(uint64(genesis.GenesisTime.AsTime().Unix()))
|
||||
if currSlot < params.BeaconConfig().SlotsPerEpoch.Mul(uint64(9)) {
|
||||
return false
|
||||
}
|
||||
if currSlot >= params.BeaconConfig().SlotsPerEpoch.Mul(uint64(10)) {
|
||||
return false
|
||||
}
|
||||
return proxyNode.SyncingInterceptor()(reqBytes, w, r)
|
||||
})
|
||||
|
||||
// Run assigned evaluators.
|
||||
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
|
||||
return errors.Wrap(err, "one or more evaluators failed")
|
||||
@@ -299,6 +316,9 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
|
||||
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
|
||||
for currentEpoch := range ticker.C() {
|
||||
if currentEpoch == 9 || currentEpoch == 10 {
|
||||
continue
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, eval := range config.Evaluators {
|
||||
// Fix reference to evaluator as it will be running
|
||||
@@ -369,7 +389,7 @@ func (r *testRunner) testTxGeneration(ctx context.Context, g *errgroup.Group, ke
|
||||
func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
|
||||
conns []*grpc.ClientConn, tickingStartTime time.Time, bootnodeEnr, minerEnr string) error {
|
||||
t, config := r.t, r.config
|
||||
index := e2e.TestParams.BeaconNodeCount
|
||||
index := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
|
||||
ethNode := eth1.NewNode(index, minerEnr)
|
||||
g.Go(func() error {
|
||||
return ethNode.Start(ctx)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"api_gateway_v1alpha1.go",
|
||||
"api_middleware.go",
|
||||
"data.go",
|
||||
"execution_engine.go",
|
||||
"finality.go",
|
||||
"fork.go",
|
||||
"metrics.go",
|
||||
|
||||
54
testing/endtoend/evaluators/execution_engine.go
Normal file
54
testing/endtoend/evaluators/execution_engine.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/policies"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// TransactionsPresent is an evaluator to make sure transactions send to the execution engine
|
||||
// appear in consensus client blocks' execution payload.
|
||||
var TransactionsPresent = types.Evaluator{
|
||||
Name: "transactions_present_at_epoch_%d",
|
||||
Policy: policies.AfterNthEpoch(helpers.BellatrixE2EForkEpoch),
|
||||
Evaluation: transactionsPresent,
|
||||
}
|
||||
|
||||
func transactionsPresent(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch.Sub(1)}}
|
||||
blks, err := client.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
expectedTxNum := int(math.Round(float64(params.E2ETestConfig().SlotsPerEpoch) * float64(e2e.NumOfExecEngineTxs) * e2e.ExpectedExecEngineTxsThreshold))
|
||||
var numberOfTxs int
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
switch ctr.Block.(type) {
|
||||
case *ethpb.BeaconBlockContainer_BellatrixBlock:
|
||||
numberOfTxs += len(ctr.GetBellatrixBlock().Block.Body.ExecutionPayload.Transactions)
|
||||
}
|
||||
}
|
||||
if numberOfTxs < expectedTxNum {
|
||||
return errors.Errorf(
|
||||
"not enough transactions in execution payload, expected=%d vs actual=%d",
|
||||
expectedTxNum,
|
||||
numberOfTxs,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,14 +14,21 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ForkTransition ensures that the hard fork has occurred successfully.
|
||||
var ForkTransition = types.Evaluator{
|
||||
Name: "fork_transition_%d",
|
||||
// AltairForkTransition ensures that the Altair hard fork has occurred successfully.
|
||||
var AltairForkTransition = types.Evaluator{
|
||||
Name: "altair_fork_transition_%d",
|
||||
Policy: policies.OnEpoch(helpers.AltairE2EForkEpoch),
|
||||
Evaluation: forkOccurs,
|
||||
Evaluation: altairForkOccurs,
|
||||
}
|
||||
|
||||
func forkOccurs(conns ...*grpc.ClientConn) error {
|
||||
// BellatrixForkTransition ensures that the Bellatrix hard fork has occurred successfully.
|
||||
var BellatrixForkTransition = types.Evaluator{
|
||||
Name: "bellatrix_fork_transition_%d",
|
||||
Policy: policies.OnEpoch(helpers.BellatrixE2EForkEpoch),
|
||||
Evaluation: bellatrixForkOccurs,
|
||||
}
|
||||
|
||||
func altairForkOccurs(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -62,3 +69,48 @@ func forkOccurs(conns ...*grpc.ClientConn) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func bellatrixForkOccurs(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
}
|
||||
fSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() == context.Canceled {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
res, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res == nil || res.Block == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() == nil && res.GetAltairBlock() == nil && res.GetBellatrixBlock() == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() != nil {
|
||||
return errors.New("phase 0 block returned after bellatrix fork has occurred")
|
||||
}
|
||||
if res.GetAltairBlock() != nil {
|
||||
return errors.New("altair block returned after bellatrix fork has occurred")
|
||||
}
|
||||
blk, err := wrapperv2.WrappedSignedBeaconBlock(res.GetBellatrixBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := coreHelper.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
if blk.Block().Slot() < fSlot {
|
||||
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -419,5 +419,8 @@ func convertToBlockInterface(obj *ethpb.BeaconBlockContainer) (block.SignedBeaco
|
||||
if obj.GetBellatrixBlock() != nil {
|
||||
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
|
||||
}
|
||||
if obj.GetBellatrixBlock() != nil {
|
||||
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
|
||||
}
|
||||
return nil, errors.New("container has no block")
|
||||
}
|
||||
|
||||
@@ -54,11 +54,13 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
|
||||
ev.ProposeVoluntaryExit,
|
||||
ev.ValidatorHasExited,
|
||||
ev.ColdStateCheckpoint,
|
||||
ev.ForkTransition,
|
||||
ev.AltairForkTransition,
|
||||
ev.BellatrixForkTransition,
|
||||
ev.APIMiddlewareVerifyIntegrity,
|
||||
ev.APIGatewayV1Alpha1VerifyIntegrity,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.TransactionsPresent,
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -19,7 +19,7 @@ type testArgs struct {
|
||||
useWeb3RemoteSigner bool
|
||||
}
|
||||
|
||||
func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
func TestEndToEnd_MinimalConfigV(t *testing.T) {
|
||||
e2eMinimal(t, &testArgs{
|
||||
usePrysmSh: false,
|
||||
useWeb3RemoteSigner: false,
|
||||
@@ -83,12 +83,14 @@ func e2eMinimal(t *testing.T, args *testArgs) {
|
||||
ev.ValidatorHasExited,
|
||||
ev.ValidatorsVoteWithTheMajority,
|
||||
ev.ColdStateCheckpoint,
|
||||
ev.ForkTransition,
|
||||
ev.AltairForkTransition,
|
||||
ev.BellatrixForkTransition,
|
||||
ev.APIMiddlewareVerifyIntegrity,
|
||||
ev.APIGatewayV1Alpha1VerifyIntegrity,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.ValidatorSyncParticipation,
|
||||
ev.TransactionsPresent,
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -31,6 +31,7 @@ type ports struct {
|
||||
Eth1RPCPort int
|
||||
Eth1AuthRPCPort int
|
||||
Eth1WSPort int
|
||||
Eth1ProxyPort int
|
||||
PrysmBeaconNodeRPCPort int
|
||||
PrysmBeaconNodeUDPPort int
|
||||
PrysmBeaconNodeTCPPort int
|
||||
@@ -69,6 +70,12 @@ var StandardLighthouseNodeCount = 2
|
||||
// DepositCount is the amount of deposits E2E makes on a separate validator client.
|
||||
var DepositCount = uint64(64)
|
||||
|
||||
// NumOfExecEngineTxs is the number of transaction sent to the execution engine.
|
||||
var NumOfExecEngineTxs = uint64(200)
|
||||
|
||||
// ExpectedExecEngineTxsThreshold is the portion of execution engine transactions we expect to find in blocks.
|
||||
var ExpectedExecEngineTxsThreshold = 0.7
|
||||
|
||||
// Base port values.
|
||||
const (
|
||||
portSpan = 50
|
||||
@@ -80,6 +87,7 @@ const (
|
||||
Eth1RPCPort = Eth1Port + portSpan
|
||||
Eth1WSPort = Eth1Port + 2*portSpan
|
||||
Eth1AuthRPCPort = Eth1Port + 3*portSpan
|
||||
Eth1ProxyPort = Eth1Port + 4*portSpan
|
||||
|
||||
PrysmBeaconNodeRPCPort = 4150
|
||||
PrysmBeaconNodeUDPPort = PrysmBeaconNodeRPCPort + portSpan
|
||||
@@ -227,6 +235,10 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eth1ProxyPort, err := port(Eth1ProxyPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconNodeRPCPort, err := port(PrysmBeaconNodeRPCPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -269,6 +281,7 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
ports.Eth1RPCPort = eth1RPCPort
|
||||
ports.Eth1AuthRPCPort = eth1AuthPort
|
||||
ports.Eth1WSPort = eth1WSPort
|
||||
ports.Eth1ProxyPort = eth1ProxyPort
|
||||
ports.PrysmBeaconNodeRPCPort = beaconNodeRPCPort
|
||||
ports.PrysmBeaconNodeUDPPort = beaconNodeUDPPort
|
||||
ports.PrysmBeaconNodeTCPPort = beaconNodeTCPPort
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -57,6 +58,7 @@ func startChainService(t *testing.T, st state.BeaconState, block block.SignedBea
|
||||
blockchain.WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
blockchain.WithAttestationPool(attestations.NewPool()),
|
||||
blockchain.WithDepositCache(depositCache),
|
||||
blockchain.WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
)
|
||||
service, err := blockchain.NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -69,7 +69,7 @@ type ValidatorService struct {
|
||||
db db.Database
|
||||
grpcHeaders []string
|
||||
graffiti []byte
|
||||
web3SignerConfig *remote_web3signer.SetupConfig
|
||||
Web3SignerConfig *remote_web3signer.SetupConfig
|
||||
feeRecipientConfig *validator_service_config.FeeRecipientConfig
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
interopKeysConfig: cfg.InteropKeysConfig,
|
||||
graffitiStruct: cfg.GraffitiStruct,
|
||||
logDutyCountDown: cfg.LogDutyCountDown,
|
||||
web3SignerConfig: cfg.Web3SignerConfig,
|
||||
Web3SignerConfig: cfg.Web3SignerConfig,
|
||||
feeRecipientConfig: cfg.FeeRecipientConfig,
|
||||
}, nil
|
||||
}
|
||||
@@ -204,7 +204,7 @@ func (v *ValidatorService) Start() {
|
||||
graffitiOrderedIndex: graffitiOrderedIndex,
|
||||
eipImportBlacklistedPublicKeys: slashablePublicKeys,
|
||||
logDutyCountDown: v.logDutyCountDown,
|
||||
Web3SignerConfig: v.web3SignerConfig,
|
||||
Web3SignerConfig: v.Web3SignerConfig,
|
||||
feeRecipientConfig: v.feeRecipientConfig,
|
||||
walletIntializedChannel: make(chan *wallet.Wallet, 1),
|
||||
}
|
||||
|
||||
@@ -117,6 +117,7 @@ func (v *validator) WaitForKeymanagerInitialization(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if v.useWeb && v.wallet == nil {
|
||||
log.Info("Waiting for keymanager to initialize validator client with web UI")
|
||||
// if wallet is not set, wait for it to be set through the UI
|
||||
km, err := waitForWebWalletInitialization(ctx, v.walletInitializedFeed, v.walletIntializedChannel)
|
||||
if err != nil {
|
||||
|
||||
@@ -32,5 +32,6 @@ go_test(
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"//validator/keymanager/remote:go_default_library",
|
||||
"//validator/keymanager/remote-web3signer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,8 +37,10 @@ go_test(
|
||||
srcs = ["keymanager_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//validator/keymanager/remote-web3signer/internal:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package remote_web3signer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
remote_utils "github.com/prysmaticlabs/prysm/validator/keymanager/remote-utils"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/internal"
|
||||
v1 "github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SetupConfig includes configuration values for initializing.
|
||||
@@ -48,6 +50,7 @@ type Keymanager struct {
|
||||
providedPublicKeys [][48]byte
|
||||
accountsChangedFeed *event.Feed
|
||||
validator *validator.Validate
|
||||
publicKeysUrlCalled bool
|
||||
}
|
||||
|
||||
// NewKeymanager instantiates a new web3signer key manager.
|
||||
@@ -55,12 +58,6 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
if cfg.BaseEndpoint == "" || !bytesutil.IsValidRoot(cfg.GenesisValidatorsRoot) {
|
||||
return nil, fmt.Errorf("invalid setup config, one or more configs are empty: BaseEndpoint: %v, GenesisValidatorsRoot: %#x", cfg.BaseEndpoint, cfg.GenesisValidatorsRoot)
|
||||
}
|
||||
if cfg.PublicKeysURL != "" && len(cfg.ProvidedPublicKeys) != 0 {
|
||||
return nil, errors.New("Either a provided list of public keys or a URL to a list of public keys must be provided, but not both")
|
||||
}
|
||||
if cfg.PublicKeysURL == "" && len(cfg.ProvidedPublicKeys) == 0 {
|
||||
return nil, errors.New("no valid public key options provided")
|
||||
}
|
||||
client, err := internal.NewApiClient(cfg.BaseEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create apiClient")
|
||||
@@ -72,6 +69,7 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
publicKeysURL: cfg.PublicKeysURL,
|
||||
providedPublicKeys: cfg.ProvidedPublicKeys,
|
||||
validator: validator.New(),
|
||||
publicKeysUrlCalled: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -79,12 +77,14 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
// from the remote server or from the provided keys if there are no existing public keys set
|
||||
// or provides the existing keys in the keymanager.
|
||||
func (km *Keymanager) FetchValidatingPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
if km.publicKeysURL != "" && len(km.providedPublicKeys) == 0 {
|
||||
if km.publicKeysURL != "" && !km.publicKeysUrlCalled {
|
||||
providedPublicKeys, err := km.client.GetPublicKeys(ctx, km.publicKeysURL)
|
||||
if err != nil {
|
||||
erroredResponsesTotal.Inc()
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not get public keys from remote server url: %v", km.publicKeysURL))
|
||||
}
|
||||
// makes sure that if the public keys are deleted the validator does not call URL again.
|
||||
km.publicKeysUrlCalled = true
|
||||
km.providedPublicKeys = providedPublicKeys
|
||||
}
|
||||
return km.providedPublicKeys, nil
|
||||
@@ -231,13 +231,8 @@ func getSignRequestJson(ctx context.Context, validator *validator.Validate, requ
|
||||
}
|
||||
|
||||
// SubscribeAccountChanges returns the event subscription for changes to public keys.
|
||||
func (*Keymanager) SubscribeAccountChanges(_ chan [][48]byte) event.Subscription {
|
||||
// Not used right now.
|
||||
// Returns a stub for the time being as there is a danger of being slashed if the apiClient reloads keys dynamically.
|
||||
// Because there is no way to dynamically reload keys, add or remove remote keys we are returning a stub without any event updates for the time being.
|
||||
return event.NewSubscription(func(i <-chan struct{}) error {
|
||||
return nil
|
||||
})
|
||||
func (km *Keymanager) SubscribeAccountChanges(pubKeysChan chan [][fieldparams.BLSPubkeyLength]byte) event.Subscription {
|
||||
return km.accountsChangedFeed.Subscribe(pubKeysChan)
|
||||
}
|
||||
|
||||
// ExtractKeystores is not supported for the remote-web3signer keymanager type.
|
||||
@@ -278,3 +273,73 @@ func (km *Keymanager) ListKeymanagerAccounts(ctx context.Context, cfg keymanager
|
||||
remote_utils.DisplayRemotePublicKeys(validatingPubKeys)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPublicKeys imports a list of public keys into the keymanager for web3signer use. Returns status with message.
|
||||
func (km *Keymanager) AddPublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.ImportedRemoteKeysStatus, error) {
|
||||
if ctx == nil {
|
||||
return nil, errors.New("context is nil")
|
||||
}
|
||||
importedRemoteKeysStatuses := make([]*ethpbservice.ImportedRemoteKeysStatus, len(pubKeys))
|
||||
for i, pubKey := range pubKeys {
|
||||
found := false
|
||||
for _, key := range km.providedPublicKeys {
|
||||
if bytes.Equal(key[:], pubKey[:]) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
importedRemoteKeysStatuses[i] = ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_DUPLICATE,
|
||||
Message: fmt.Sprintf("Duplicate pubkey: %v, already in use", hexutil.Encode(pubKey[:])),
|
||||
}
|
||||
continue
|
||||
}
|
||||
km.providedPublicKeys = append(km.providedPublicKeys, pubKey)
|
||||
importedRemoteKeysStatuses[i] = ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_IMPORTED,
|
||||
Message: fmt.Sprintf("Successfully added pubkey: %v", hexutil.Encode(pubKey[:])),
|
||||
}
|
||||
log.Debug("Added pubkey to keymanager for web3signer", "pubkey", hexutil.Encode(pubKey[:]))
|
||||
}
|
||||
km.accountsChangedFeed.Send(km.providedPublicKeys)
|
||||
return importedRemoteKeysStatuses, nil
|
||||
}
|
||||
|
||||
// DeletePublicKeys removes a list of public keys from the keymanager for web3signer use. Returns status with message.
|
||||
func (km *Keymanager) DeletePublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.DeletedRemoteKeysStatus, error) {
|
||||
if ctx == nil {
|
||||
return nil, errors.New("context is nil")
|
||||
}
|
||||
deletedRemoteKeysStatuses := make([]*ethpbservice.DeletedRemoteKeysStatus, len(pubKeys))
|
||||
if len(km.providedPublicKeys) == 0 {
|
||||
for i := range deletedRemoteKeysStatuses {
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
|
||||
Message: "No pubkeys are set in validator",
|
||||
}
|
||||
}
|
||||
return deletedRemoteKeysStatuses, nil
|
||||
}
|
||||
for i, pubkey := range pubKeys {
|
||||
for in, key := range km.providedPublicKeys {
|
||||
if bytes.Equal(key[:], pubkey[:]) {
|
||||
km.providedPublicKeys = append(km.providedPublicKeys[:in], km.providedPublicKeys[in+1:]...)
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_DELETED,
|
||||
Message: fmt.Sprintf("Successfully deleted pubkey: %v", hexutil.Encode(pubkey[:])),
|
||||
}
|
||||
log.Debug("Deleted pubkey from keymanager for web3signer", "pubkey", hexutil.Encode(pubkey[:]))
|
||||
break
|
||||
}
|
||||
}
|
||||
if deletedRemoteKeysStatuses[i] == nil {
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
|
||||
Message: fmt.Sprintf("Pubkey: %v not found", hexutil.Encode(pubkey[:])),
|
||||
}
|
||||
}
|
||||
}
|
||||
km.accountsChangedFeed.Send(km.providedPublicKeys)
|
||||
return deletedRemoteKeysStatuses, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user