mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54ae2b32b2 | ||
|
|
58f4ba758c | ||
|
|
64f64f06bf | ||
|
|
e70055733f | ||
|
|
36e4f49af0 | ||
|
|
d98428dec4 | ||
|
|
00b92e01d3 | ||
|
|
b84c1aa3ea | ||
|
|
ca5adbf7e4 | ||
|
|
621e149dce | ||
|
|
a083b7a0a5 | ||
|
|
dd5995b665 |
@@ -138,6 +138,26 @@ var (
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_valid_node_count",
|
||||
Help: "Count the number of valid nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after newPayload EE call",
|
||||
})
|
||||
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_valid_node_count",
|
||||
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -83,16 +83,18 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
@@ -135,12 +137,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -157,7 +161,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
|
||||
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
newPayloadValidNodeCount.Inc()
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
|
||||
@@ -127,8 +127,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -148,6 +148,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -609,9 +612,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ type DepositFetcher interface {
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
|
||||
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
@@ -246,7 +246,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
@@ -256,10 +256,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,7 +554,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), nil)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -611,7 +611,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -51,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedNodesCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proto_array_validated_nodes_count",
|
||||
Help: "The number of nodes that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,7 +43,6 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) er
|
||||
if index == NonExistentNode {
|
||||
break
|
||||
}
|
||||
validatedNodesCount.Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -614,16 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
node := copyNode(s.nodes[idx])
|
||||
parentIdx, ok := canonicalNodesMap[node.parent]
|
||||
if ok {
|
||||
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
|
||||
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
|
||||
currentIndex := uint64(len(canonicalNodes))
|
||||
s.nodesIndices[node.root] = currentIndex
|
||||
s.payloadIndices[node.payloadHash] = currentIndex
|
||||
canonicalNodesMap[idx] = currentIndex
|
||||
node.parent = parentIdx
|
||||
canonicalNodes = append(canonicalNodes, node)
|
||||
} else {
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
// Remove node that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
delete(s.canonicalNodes, node.root)
|
||||
delete(s.payloadIndices, node.payloadHash)
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
s.canonicalNodes[finalizedRoot] = true
|
||||
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
|
||||
|
||||
// Recompute the best child and descendant for each canonical nodes.
|
||||
for _, node := range canonicalNodes {
|
||||
|
||||
@@ -375,7 +375,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
@@ -413,7 +413,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
@@ -441,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: 1,
|
||||
root: indexToHash(uint64(0)),
|
||||
parent: NonExistentNode,
|
||||
payloadHash: [32]byte{'A'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -448,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
parent: 0,
|
||||
payloadHash: [32]byte{'B'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -455,6 +457,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
parent: 0,
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
payloadHash: [32]byte{'C'},
|
||||
},
|
||||
}
|
||||
s := &Store{
|
||||
@@ -465,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(1)): 1,
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
canonicalNodes: map[[32]byte]bool{
|
||||
indexToHash(uint64(0)): true,
|
||||
indexToHash(uint64(1)): true,
|
||||
indexToHash(uint64(2)): true,
|
||||
},
|
||||
payloadIndices: map[[32]byte]uint64{
|
||||
[32]byte{'A'}: 0,
|
||||
[32]byte{'B'}: 1,
|
||||
[32]byte{'C'}: 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
require.Equal(t, 1, len(s.nodes))
|
||||
require.Equal(t, 1, len(s.nodesIndices))
|
||||
require.Equal(t, 1, len(s.canonicalNodes))
|
||||
require.Equal(t, 1, len(s.payloadIndices))
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -482,25 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
func TestStore_PruneBranched(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, f.NodeCount())
|
||||
tests := []struct {
|
||||
finalizedRoot [32]byte
|
||||
wantedCanonical [32]byte
|
||||
wantedNonCanonical [32]byte
|
||||
canonicalCount int
|
||||
payloadHash [32]byte
|
||||
payloadIndex uint64
|
||||
nonExistentPayload [32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'a'},
|
||||
1,
|
||||
[32]byte{'F'},
|
||||
0,
|
||||
[32]byte{'H'},
|
||||
},
|
||||
{
|
||||
[32]byte{'d'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
3,
|
||||
[32]byte{'E'},
|
||||
1,
|
||||
[32]byte{'C'},
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'h'},
|
||||
5,
|
||||
[32]byte{'D'},
|
||||
3,
|
||||
[32]byte{'A'},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
|
||||
|
||||
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
|
||||
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
|
||||
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
|
||||
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
|
||||
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
@@ -16,6 +15,7 @@ go_library(
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
|
||||
@@ -59,7 +59,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -74,7 +73,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
@@ -125,7 +123,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
if !hasTtdReached {
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -11,11 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
|
||||
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithJWTSecret(secret []byte) Option {
|
||||
return func(c *Service) error {
|
||||
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
|
||||
return func(s *Service) error {
|
||||
if len(secret) == 0 {
|
||||
return nil
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
stringEndpoints := dedupEndpoints(endpointStrings)
|
||||
endpoints := make([]network.Endpoint, len(stringEndpoints))
|
||||
// Overwrite authorization type for all endpoints to be of a bearer
|
||||
// type.
|
||||
for i, e := range stringEndpoints {
|
||||
hEndpoint := HttpEndpoint(e)
|
||||
hEndpoint.Auth.Method = authorization.Bearer
|
||||
hEndpoint.Auth.Value = string(secret)
|
||||
endpoints[i] = hEndpoint
|
||||
}
|
||||
c.cfg.httpRPCClient = &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
// Select first http endpoint in the provided list.
|
||||
var currEndpoint network.Endpoint
|
||||
if len(endpointStrings) > 0 {
|
||||
currEndpoint = endpoints[0]
|
||||
}
|
||||
s.cfg.httpEndpoints = endpoints
|
||||
s.cfg.currHttpEndpoint = currEndpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
176
beacon-chain/powchain/rpc_connection.go
Normal file
176
beacon-chain/powchain/rpc_connection.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
// Attach the clients to the service struct.
|
||||
fetcher := ethclient.NewClient(client)
|
||||
s.rpcClient = client
|
||||
s.httpLogger = fetcher
|
||||
s.eth1DataFetcher = fetcher
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
|
||||
if err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
|
||||
// connection and if this does not work, we fallback to the next endpoint if defined.
|
||||
func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.runError = err
|
||||
s.fallbackToNextEndpoint()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Forces to retry an execution client connection.
|
||||
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is
|
||||
// expected to be small, it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Checks the chain ID of the execution client to ensure
|
||||
// it matches local parameters of what Prysm expects.
|
||||
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
|
||||
cID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wantChainID := params.BeaconConfig().DepositChainID
|
||||
if cID.Uint64() != wantChainID {
|
||||
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -39,10 +38,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
@@ -114,6 +111,7 @@ type Chain interface {
|
||||
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
|
||||
// fetching eth1 data from the clients.
|
||||
type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
type RPCClient interface {
|
||||
Close()
|
||||
BatchCall(b []gethRPC.BatchElem) error
|
||||
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
|
||||
}
|
||||
@@ -135,7 +134,6 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
httpRPCClient *http.Client
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
log.WithError(err).Fatal("Could not connect to execution endpoint")
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
|
||||
}).Info("Connected to Ethereum execution client RPC")
|
||||
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
|
||||
// able to generate the genesis state.
|
||||
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
|
||||
@@ -253,7 +246,7 @@ func (s *Service) Start() {
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
go s.pollConnectionStatus()
|
||||
go s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
|
||||
if s.cancel != nil {
|
||||
defer s.cancel()
|
||||
}
|
||||
s.closeClients()
|
||||
if s.rpcClient != nil {
|
||||
s.rpcClient.Close()
|
||||
}
|
||||
if s.eth1DataFetcher != nil {
|
||||
s.eth1DataFetcher.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
|
||||
|
||||
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
|
||||
func (s *Service) CurrentETH1ConnectionError() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
return err
|
||||
return s.runError
|
||||
}
|
||||
|
||||
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
|
||||
@@ -358,10 +353,17 @@ func (s *Service) ETH1Endpoints() []string {
|
||||
func (s *Service) ETH1ConnectionErrors() []error {
|
||||
var errs []error
|
||||
for _, ep := range s.cfg.httpEndpoints {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
errs = append(errs, err)
|
||||
client, err := s.newRPCClientWithAuth(s.ctx, ep)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
|
||||
client.Close()
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
client.Close()
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@@ -376,146 +378,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
return latestValidBlock, nil
|
||||
}
|
||||
|
||||
func (s *Service) connectToPowChain() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
|
||||
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
|
||||
return errors.New("execution client RPC is nil")
|
||||
}
|
||||
s.httpLogger = httpClient
|
||||
s.eth1DataFetcher = httpClient
|
||||
s.depositContractCaller = depositContractCaller
|
||||
s.rpcClient = rpcClient
|
||||
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
|
||||
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpRPCClient.SetHeader("Authorization", header)
|
||||
}
|
||||
httpClient := ethclient.NewClient(httpRPCClient)
|
||||
// Add a method to clean-up and close clients in the event
|
||||
// of any connection failure.
|
||||
closeClients := func() {
|
||||
httpRPCClient.Close()
|
||||
httpClient.Close()
|
||||
}
|
||||
// Make a simple call to ensure we are actually connected to a working node.
|
||||
cID, err := httpClient.ChainID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
nID, err := httpClient.NetworkID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if cID.Uint64() != params.BeaconConfig().DepositChainID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
|
||||
}
|
||||
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
|
||||
}
|
||||
|
||||
return httpClient, httpRPCClient, nil
|
||||
}
|
||||
|
||||
// closes down our active eth1 clients.
|
||||
func (s *Service) closeClients() {
|
||||
gethClient, ok := s.rpcClient.(*gethRPC.Client)
|
||||
if ok {
|
||||
gethClient.Close()
|
||||
}
|
||||
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
|
||||
if ok {
|
||||
httpClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pollConnectionStatus() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
errConnect := s.connectToPowChain()
|
||||
if errConnect != nil {
|
||||
errorLogger(errConnect, "Could not connect to powchain endpoint")
|
||||
s.runError = errConnect
|
||||
s.fallbackToNextEndpoint()
|
||||
continue
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the eth1 node is healthy and ready to serve before
|
||||
// fetching data from it.
|
||||
func (s *Service) isEth1NodeSynced() (bool, error) {
|
||||
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if syncProg != nil {
|
||||
return false, nil
|
||||
}
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !eth1HeadIsBehind(head.Time), nil
|
||||
}
|
||||
|
||||
// Reconnect to eth1 node in case of any failure.
|
||||
func (s *Service) retryETH1Node(err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before
|
||||
// resuming dialing the eth1 node.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
|
||||
if len(ctrs) == 0 {
|
||||
return nil
|
||||
@@ -650,7 +512,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
|
||||
// check that web3 client is syncing
|
||||
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
|
||||
log.Warn("eth1 client is not syncing")
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
@@ -680,6 +542,15 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
|
||||
func (s *Service) initPOWService() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
|
||||
// Run in a select loop to retry in the event of any failures.
|
||||
for {
|
||||
@@ -690,8 +561,8 @@ func (s *Service) initPOWService() {
|
||||
ctx := s.ctx
|
||||
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve latest execution client header")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -700,14 +571,14 @@ func (s *Service) initPOWService() {
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to process past deposit contract logs")
|
||||
continue
|
||||
}
|
||||
// Cache eth1 headers from our voting period.
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to cache headers for execution client votes")
|
||||
continue
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
@@ -720,15 +591,15 @@ func (s *Service) initPOWService() {
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
|
||||
continue
|
||||
}
|
||||
genBlock = genHeader.Number.Uint64()
|
||||
}
|
||||
s.chainStartData.GenesisBlock = genBlock
|
||||
if err := s.savePowchainData(ctx); err != nil {
|
||||
log.Errorf("Unable to save powchain data: %v", err)
|
||||
errorLogger(err, "Unable to save execution client data")
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -757,17 +628,16 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.retryExecutionClientConnection(s.ctx, err)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
s.retryETH1Node(errFarBehind)
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
s.checkDefaultEndpoint()
|
||||
s.checkDefaultEndpoint(s.ctx)
|
||||
case <-chainstartTicker.C:
|
||||
if s.chainStartData.Chainstarted {
|
||||
chainstartTicker.Stop()
|
||||
@@ -854,59 +724,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
return hdr.Number.Uint64(), nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint() {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
|
||||
if err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("Primary endpoint ready again, switching back to it")
|
||||
// Close the clients and let our main connection routine
|
||||
// properly connect with it.
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
// Close current active clients.
|
||||
s.closeClients()
|
||||
|
||||
// Switch back to primary endpoint and try connecting
|
||||
// to it again.
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
s.retryETH1Node(nil)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is expected to be
|
||||
// small ( < 25), it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// initializes our service from the provided eth1data object by initializing all the relevant
|
||||
// fields and data.
|
||||
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {
|
||||
|
||||
@@ -42,6 +42,8 @@ type goodLogger struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodLogger) Close() {}
|
||||
|
||||
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
if g.backend == nil {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
@@ -80,6 +82,8 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodFetcher) Close() {}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
now := time.Now()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
synced, err := web3Service.isEth1NodeSynced()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
|
||||
}
|
||||
|
||||
func TestFollowBlock_OK(t *testing.T) {
|
||||
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
|
||||
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
|
||||
@@ -144,6 +144,8 @@ type RPCClient struct {
|
||||
Backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *RPCClient) Close() {}
|
||||
|
||||
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
case version.Bellatrix:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
||||
|
||||
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
|
||||
depositTrie = finalizedDeposits.Deposits
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
|
||||
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range upToEth1DataDeposits {
|
||||
|
||||
@@ -15,7 +15,7 @@ var (
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
Usage: "A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
|
||||
Value: "",
|
||||
Value: "http://localhost:8545",
|
||||
}
|
||||
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
|
||||
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests
|
||||
|
||||
@@ -27,7 +27,7 @@ func FlagOptions(c *cli.Context) ([]powchain.Option, error) {
|
||||
powchain.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
|
||||
}
|
||||
if len(jwtSecret) > 0 {
|
||||
opts = append(opts, powchain.WithJWTSecret(jwtSecret))
|
||||
opts = append(opts, powchain.WithHttpEndpointsAndJWTSecret(endpoints, jwtSecret))
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.DepositChainID = 1337 // Chain ID of eth1 dev net.
|
||||
e2eConfig.DepositNetworkID = 1337 // Network ID of eth1 dev net.
|
||||
|
||||
// Altair Fork Parameters.
|
||||
// Fork Parameters.
|
||||
e2eConfig.AltairForkEpoch = altairE2EForkEpoch
|
||||
e2eConfig.BellatrixForkEpoch = bellatrixE2EForkEpoch
|
||||
|
||||
|
||||
@@ -3,24 +3,32 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"endpoint.go",
|
||||
"external_ip.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/network",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//network/authorization:go_default_library"],
|
||||
deps = [
|
||||
"//network/authorization:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"endpoint_test.go",
|
||||
"external_ip_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package powchain
|
||||
package network
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
// This creates a custom HTTP transport which we can attach to our HTTP client
|
||||
// in order to inject JWT auth strings into our HTTP request headers. Authentication
|
||||
// is required when interacting with an Ethereum engine API server via HTTP, and JWT
|
||||
@@ -1,4 +1,4 @@
|
||||
package powchain
|
||||
package network
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -2,6 +2,7 @@ package network
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
@@ -24,6 +25,22 @@ func (e Endpoint) Equals(other Endpoint) bool {
|
||||
return e.Url == other.Url && e.Auth.Equals(other.Auth)
|
||||
}
|
||||
|
||||
// HttpClient creates a http client object dependant
|
||||
// on the properties of the network endpoint.
|
||||
func (e Endpoint) HttpClient() *http.Client {
|
||||
if e.Auth.Method != authorization.Bearer {
|
||||
return http.DefaultClient
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: []byte(e.Auth.Value),
|
||||
}
|
||||
return &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
}
|
||||
}
|
||||
|
||||
// Equals compares two authorization data objects for equality.
|
||||
func (d AuthorizationData) Equals(other AuthorizationData) bool {
|
||||
return d.Method == other.Method && d.Value == other.Value
|
||||
|
||||
1042
proto/eth/service/key_management.pb.go
generated
1042
proto/eth/service/key_management.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -123,6 +123,92 @@ func local_request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshale
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.ListRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.ListRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ImportRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.ImportRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ImportRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.ImportRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteRemoteKeysRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.DeleteRemoteKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
|
||||
// UnaryRPC :call KeyManagementServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -198,6 +284,75 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -299,6 +454,66 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -308,6 +523,12 @@ var (
|
||||
pattern_KeyManagement_ImportKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
|
||||
|
||||
pattern_KeyManagement_ListRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_ImportRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -316,4 +537,10 @@ var (
|
||||
forward_KeyManagement_ImportKeystores_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteKeystores_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_ListRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_ImportRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -84,6 +84,27 @@ service KeyManagement {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
rpc ListRemoteKeys(google.protobuf.Empty) returns (ListRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/internal/eth/v1/remotekeys"
|
||||
};
|
||||
}
|
||||
|
||||
rpc ImportRemoteKeys(ImportRemoteKeysRequest) returns (ImportRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/internal/eth/v1/remotekeys",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
rpc DeleteRemoteKeys(DeleteRemoteKeysRequest) returns (DeleteRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
delete: "/internal/eth/v1/remotekeys",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message ListKeystoresResponse {
|
||||
@@ -133,3 +154,55 @@ message DeletedKeystoreStatus {
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
|
||||
message ListRemoteKeysResponse {
|
||||
message Keystore {
|
||||
bytes pubkey = 1;
|
||||
string url = 2;
|
||||
bool readonly = 3;
|
||||
}
|
||||
repeated Keystore data = 1;
|
||||
}
|
||||
|
||||
message ImportRemoteKeysRequest {
|
||||
message Keystore {
|
||||
bytes pubkey = 1;
|
||||
string url = 2;
|
||||
}
|
||||
repeated Keystore remote_keys = 1;
|
||||
}
|
||||
|
||||
message ImportRemoteKeysResponse {
|
||||
repeated ImportedRemoteKeysStatus data = 1;
|
||||
}
|
||||
|
||||
message DeleteRemoteKeysRequest {
|
||||
repeated bytes pubkeys = 1;
|
||||
}
|
||||
|
||||
message DeleteRemoteKeysResponse {
|
||||
repeated DeletedRemoteKeysStatus data = 1;
|
||||
}
|
||||
|
||||
message ImportedRemoteKeysStatus {
|
||||
enum Status {
|
||||
UNKNOWN = 0;
|
||||
IMPORTED = 1;
|
||||
DUPLICATE = 2;
|
||||
ERROR = 3;
|
||||
}
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
message DeletedRemoteKeysStatus {
|
||||
enum Status {
|
||||
NOT_FOUND = 0;
|
||||
DELETED = 1;
|
||||
ERROR = 3; // skips 2 to match Delete KeyStore status which has error = 3.
|
||||
}
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
|
||||
1546
proto/prysm/v1alpha1/validator.pb.go
generated
1546
proto/prysm/v1alpha1/validator.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -360,6 +360,9 @@ message StreamBlocksResponse {
|
||||
|
||||
// Representing an altair block.
|
||||
SignedBeaconBlockAltair altair_block = 2;
|
||||
|
||||
// Representing a bellatrix block.
|
||||
SignedBeaconBlockBellatrix bellatrix_block = 3;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/slasher/simulator:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"lighthouse_beacon.go",
|
||||
"lighthouse_validator.go",
|
||||
"log.go",
|
||||
"proxy.go",
|
||||
"tracing_sink.go",
|
||||
"validator.go",
|
||||
"web3remotesigner.go",
|
||||
@@ -29,6 +30,7 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/endtoend/components/eth1:go_default_library",
|
||||
"//testing/endtoend/helpers:go_default_library",
|
||||
|
||||
@@ -117,8 +117,10 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
expectedNumOfPeers += 1
|
||||
}
|
||||
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index)+"/")
|
||||
eth1Port := e2e.TestParams.Ports.Eth1AuthRPCPort
|
||||
if index == 0 {
|
||||
jwtPath = path.Join(e2e.TestParams.TestPath, "eth1data/miner/")
|
||||
eth1Port = e2e.TestParams.Ports.Eth1ProxyPort
|
||||
}
|
||||
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
|
||||
args := []string{
|
||||
@@ -126,7 +128,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
|
||||
fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
|
||||
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+index),
|
||||
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1RPCPort+index),
|
||||
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, eth1Port+index),
|
||||
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
|
||||
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),
|
||||
|
||||
511
testing/endtoend/components/proxy.go
Normal file
511
testing/endtoend/components/proxy.go
Normal file
@@ -0,0 +1,511 @@
|
||||
package components
|
||||
|
||||
// Package main provides a proxy middleware for engine API requests between Ethereum
|
||||
// consensus clients and execution clients accordingly. Allows for configuration of various
|
||||
// test cases using yaml files as detailed in the README.md of the document.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
spoofingConfigFile = flag.String("spoofing-config", "tools/engine-proxy/spoofing_config.yaml", "")
|
||||
executionEndpoint = flag.String("execution-endpoint", "http://localhost:8545", "")
|
||||
fuzz = flag.Bool(
|
||||
"fuzz",
|
||||
false,
|
||||
"fuzzes requests and responses, overrides -spoofing-config if -fuzz is set",
|
||||
)
|
||||
)
|
||||
|
||||
type spoofingConfig struct {
|
||||
Requests []*spoof `yaml:"requests"`
|
||||
Responses []*spoof `yaml:"responses"`
|
||||
}
|
||||
|
||||
type spoof struct {
|
||||
Method string
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
type jsonRPCObject struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params"`
|
||||
ID uint64 `json:"id"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
type forkchoiceUpdatedResponse struct {
|
||||
Status *pb.PayloadStatus `json:"payloadStatus"`
|
||||
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
|
||||
}
|
||||
|
||||
type InterceptorFunc func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool
|
||||
|
||||
type ProxyNode struct {
|
||||
address string
|
||||
srv *http.Server
|
||||
destAddress string
|
||||
logger *logrus.Logger
|
||||
logEntry *logrus.Entry
|
||||
interceptor InterceptorFunc
|
||||
backedUpRequests []*http.Request
|
||||
}
|
||||
|
||||
func NewProxyNode(proxyPort int, destAddress string) *ProxyNode {
|
||||
pn := &ProxyNode{}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", pn.proxyHandler())
|
||||
addr := "127.0.0.1:" + strconv.Itoa(proxyPort)
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
Addr: addr,
|
||||
}
|
||||
pn.address = addr
|
||||
pn.srv = srv
|
||||
pn.destAddress = destAddress
|
||||
return pn
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) Start(ctx context.Context) error {
|
||||
stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1-proxy.log")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pn.logger = logrus.New()
|
||||
pn.logger.SetOutput(stdOutFile)
|
||||
pn.logEntry = pn.logger.WithField("prefix", "engine-proxy")
|
||||
pn.logEntry.Infof("Engine proxy now listening on address %s", pn.address)
|
||||
pn.srv.BaseContext = func(listener net.Listener) context.Context {
|
||||
return ctx
|
||||
}
|
||||
go func() {
|
||||
if err := pn.srv.ListenAndServe(); err != nil {
|
||||
pn.logEntry.Error(err)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pn.srv.Shutdown(context.Background())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) AddInterceptor(icptr InterceptorFunc) {
|
||||
pn.interceptor = icptr
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) SyncingInterceptor() InterceptorFunc {
|
||||
return func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
|
||||
if !pn.checkIfValid(reqBytes) {
|
||||
return false
|
||||
}
|
||||
pn.returnSyncingResponse(reqBytes, w, r)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Proxies requests from a consensus client to an execution client, spoofing requests
|
||||
// and/or responses as desired. Acts as a middleware useful for testing different merge scenarios.
|
||||
func (pn *ProxyNode) proxyHandler() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestBytes, err := pn.parseRequestBytes(r)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not parse request")
|
||||
return
|
||||
}
|
||||
if pn.interceptor != nil && pn.interceptor(requestBytes, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, rq := range pn.backedUpRequests {
|
||||
requestB, err := pn.parseRequestBytes(rq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not parse request")
|
||||
return
|
||||
}
|
||||
destAddr := "http://" + pn.destAddress
|
||||
|
||||
// Create a new proxy request to the execution client.
|
||||
url := rq.URL
|
||||
url.Host = destAddr
|
||||
proxyReq, err := http.NewRequest(rq.Method, destAddr, rq.Body)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could create new request")
|
||||
return
|
||||
}
|
||||
|
||||
// Set the modified request as the proxy request body.
|
||||
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestB))
|
||||
|
||||
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
|
||||
proxyReq.Header.Set("Host", rq.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", rq.RemoteAddr)
|
||||
proxyReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
proxyRes, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
// Pipe the proxy response to the original caller.
|
||||
if _, err = io.Copy(buf, proxyRes.Body); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
if err = proxyRes.Body.Close(); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
pn.logEntry.Infof("Queued Request Response: %s", buf.String())
|
||||
}
|
||||
pn.backedUpRequests = []*http.Request{}
|
||||
|
||||
/*
|
||||
var modifiedReq []byte
|
||||
if *fuzz {
|
||||
modifiedReq, err = fuzzRequest(requestBytes)
|
||||
} else {
|
||||
// We optionally spoof the request as desired.
|
||||
modifiedReq, err = spoofRequest(config, requestBytes)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to spoof request")
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
destAddr := "http://" + pn.destAddress
|
||||
|
||||
// Create a new proxy request to the execution client.
|
||||
url := r.URL
|
||||
url.Host = destAddr
|
||||
proxyReq, err := http.NewRequest(r.Method, destAddr, r.Body)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could create new request")
|
||||
return
|
||||
}
|
||||
|
||||
// Set the modified request as the proxy request body.
|
||||
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
|
||||
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
|
||||
proxyReq.Header.Set("Host", r.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
||||
proxyReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
proxyRes, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// We optionally spoof the response as desired.
|
||||
var modifiedResp []byte
|
||||
if *fuzz {
|
||||
modifiedResp, err = fuzzResponse(proxyRes.Body)
|
||||
} else {
|
||||
modifiedResp, err = spoofResponse(config, requestBytes, proxyRes.Body)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to spoof response")
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
// Pipe the proxy response to the original caller.
|
||||
if _, err = io.Copy(w, proxyRes.Body); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
if err = proxyRes.Body.Close(); err != nil {
|
||||
pn.logEntry.WithError(err).Error("Could not do client proxy")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) parseRequestBytes(req *http.Request) ([]byte, error) {
|
||||
requestBytes, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = req.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pn.logEntry.Infof("%s", string(requestBytes))
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
return requestBytes, nil
|
||||
}
|
||||
|
||||
func fuzzRequest(requestBytes []byte) ([]byte, error) {
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return requestBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(jsonRequest.Params) == 0 {
|
||||
return requestBytes, nil
|
||||
}
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], ¶ms); err != nil {
|
||||
return requestBytes, nil
|
||||
}
|
||||
// 10% chance to fuzz a field.
|
||||
fuzzProbability := uint64(5) // TODO: Do not hardcode.
|
||||
for k, v := range params {
|
||||
// Each field has a probability of of being fuzzed.
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(100))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch obj := v.(type) {
|
||||
case string:
|
||||
if strings.Contains(obj, "0x") && r.Uint64() <= fuzzProbability {
|
||||
// Ignore hex strings of odd length.
|
||||
if len(obj)%2 != 0 {
|
||||
continue
|
||||
}
|
||||
// Generate some random hex string with same length and set it.
|
||||
// TODO: Experiment with length < current field, or junk in general.
|
||||
hexBytes, err := hexutil.Decode(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fuzzedStr, err := randomHex(len(hexBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Warnf(
|
||||
"Fuzzing field %s, modifying value from %s to %s",
|
||||
k,
|
||||
obj,
|
||||
fuzzedStr,
|
||||
)
|
||||
params[k] = fuzzedStr
|
||||
}
|
||||
}
|
||||
}
|
||||
jsonRequest.Params[0] = params
|
||||
return json.Marshal(jsonRequest)
|
||||
}
|
||||
|
||||
// Parses the request from thec consensus client and checks if user desires
|
||||
// to spoof it based on the JSON-RPC method. If so, it returns the modified
|
||||
// request bytes which will be proxied to the execution client.
|
||||
func spoofRequest(config *spoofingConfig, requestBytes []byte) ([]byte, error) {
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return requestBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(jsonRequest.Params) == 0 {
|
||||
return requestBytes, nil
|
||||
}
|
||||
desiredMethodsToSpoof := make(map[string]*spoof)
|
||||
for _, spoofReq := range config.Requests {
|
||||
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
|
||||
}
|
||||
// If we don't want to spoof the request, just return the request as-is.
|
||||
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
|
||||
if !ok {
|
||||
return requestBytes, nil
|
||||
}
|
||||
|
||||
// TODO: Support methods with multiple params.
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], ¶ms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for fieldToModify, fieldValue := range spoofDetails.Fields {
|
||||
if _, ok := params[fieldToModify]; !ok {
|
||||
continue
|
||||
}
|
||||
params[fieldToModify] = fieldValue
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Infof("Modified request %v", params)
|
||||
jsonRequest.Params[0] = params
|
||||
return json.Marshal(jsonRequest)
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) checkIfValid(reqBytes []byte) bool {
|
||||
jsonRequest, err := unmarshalRPCObject(reqBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
if strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1") ||
|
||||
strings.Contains(jsonRequest.Method, "engine_newPayloadV1") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pn *ProxyNode) returnSyncingResponse(reqBytes []byte, w http.ResponseWriter, r *http.Request) {
|
||||
jsonRequest, err := unmarshalRPCObject(reqBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1"):
|
||||
resp := &forkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_SYNCING,
|
||||
},
|
||||
PayloadId: nil,
|
||||
}
|
||||
jResp := &jsonRPCObject{
|
||||
Method: jsonRequest.Method,
|
||||
ID: jsonRequest.ID,
|
||||
Result: resp,
|
||||
}
|
||||
rawResp, err := json.Marshal(jResp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(rawResp)
|
||||
_ = err
|
||||
return
|
||||
case strings.Contains(jsonRequest.Method, "engine_newPayloadV1"):
|
||||
resp := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_SYNCING,
|
||||
}
|
||||
jResp := &jsonRPCObject{
|
||||
Method: jsonRequest.Method,
|
||||
ID: jsonRequest.ID,
|
||||
Result: resp,
|
||||
}
|
||||
rawResp, err := json.Marshal(jResp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(rawResp)
|
||||
_ = err
|
||||
pn.backedUpRequests = append(pn.backedUpRequests, r)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fuzzResponse(responseBody io.Reader) ([]byte, error) {
|
||||
responseBytes, err := ioutil.ReadAll(responseBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return responseBytes, nil
|
||||
}
|
||||
|
||||
// Parses the response body from the execution client and checks if user desires
|
||||
// to spoof it based on the JSON-RPC method. If so, it returns the modified
|
||||
// response bytes which will be proxied to the consensus client.
|
||||
func spoofResponse(config *spoofingConfig, requestBytes []byte, responseBody io.Reader) ([]byte, error) {
|
||||
responseBytes, err := ioutil.ReadAll(responseBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the JSON request is not a JSON-RPC object, return the request as-is.
|
||||
jsonRequest, err := unmarshalRPCObject(requestBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return responseBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonResponse, err := unmarshalRPCObject(responseBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return responseBytes, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
desiredMethodsToSpoof := make(map[string]*spoof)
|
||||
for _, spoofReq := range config.Responses {
|
||||
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
|
||||
}
|
||||
// If we don't want to spoof the request, just return the request as-is.
|
||||
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
|
||||
if !ok {
|
||||
return responseBytes, nil
|
||||
}
|
||||
|
||||
// TODO: Support nested objects.
|
||||
params := make(map[string]interface{})
|
||||
if err := extractObjectFromJSONRPC(jsonResponse.Result, ¶ms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for fieldToModify, fieldValue := range spoofDetails.Fields {
|
||||
if _, ok := params[fieldToModify]; !ok {
|
||||
continue
|
||||
}
|
||||
params[fieldToModify] = fieldValue
|
||||
}
|
||||
log.WithField("method", jsonRequest.Method).Infof("Modified response %v", params)
|
||||
jsonResponse.Result = params
|
||||
return json.Marshal(jsonResponse)
|
||||
}
|
||||
|
||||
func unmarshalRPCObject(rawBytes []byte) (*jsonRPCObject, error) {
|
||||
jsonObj := &jsonRPCObject{}
|
||||
if err := json.Unmarshal(rawBytes, jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jsonObj, nil
|
||||
}
|
||||
|
||||
func extractObjectFromJSONRPC(src interface{}, dst interface{}) error {
|
||||
rawResp, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(rawResp, dst)
|
||||
}
|
||||
|
||||
func randomHex(n int) (string, error) {
|
||||
b := make([]byte, n)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "0x" + hex.EncodeToString(b), nil
|
||||
}
|
||||
@@ -7,8 +7,10 @@ package endtoend
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -27,6 +29,7 @@ import (
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
@@ -79,7 +82,10 @@ func (r *testRunner) run() {
|
||||
g.Go(func() error {
|
||||
return tracingSink.Start(ctx)
|
||||
})
|
||||
|
||||
proxyNode := components.NewProxyNode(e2e.TestParams.Ports.Eth1ProxyPort, "127.0.0.1:"+strconv.Itoa(e2e.TestParams.Ports.Eth1RPCPort))
|
||||
g.Go(func() error {
|
||||
return proxyNode.Start(ctx)
|
||||
})
|
||||
if multiClientActive {
|
||||
keyGen = components.NewKeystoreGenerator()
|
||||
|
||||
@@ -254,6 +260,17 @@ func (r *testRunner) run() {
|
||||
require.NoError(t, err)
|
||||
tickingStartTime := helpers.EpochTickerStartTime(genesis)
|
||||
|
||||
proxyNode.AddInterceptor(func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
|
||||
currSlot := slots.CurrentSlot(uint64(genesis.GenesisTime.AsTime().Unix()))
|
||||
if currSlot < params.BeaconConfig().SlotsPerEpoch.Mul(uint64(9)) {
|
||||
return false
|
||||
}
|
||||
if currSlot >= params.BeaconConfig().SlotsPerEpoch.Mul(uint64(10)) {
|
||||
return false
|
||||
}
|
||||
return proxyNode.SyncingInterceptor()(reqBytes, w, r)
|
||||
})
|
||||
|
||||
// Run assigned evaluators.
|
||||
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
|
||||
return errors.Wrap(err, "one or more evaluators failed")
|
||||
@@ -299,6 +316,9 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
|
||||
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
|
||||
for currentEpoch := range ticker.C() {
|
||||
if currentEpoch == 9 || currentEpoch == 10 {
|
||||
continue
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, eval := range config.Evaluators {
|
||||
// Fix reference to evaluator as it will be running
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"api_gateway_v1alpha1.go",
|
||||
"api_middleware.go",
|
||||
"data.go",
|
||||
"execution_engine.go",
|
||||
"finality.go",
|
||||
"fork.go",
|
||||
"metrics.go",
|
||||
|
||||
54
testing/endtoend/evaluators/execution_engine.go
Normal file
54
testing/endtoend/evaluators/execution_engine.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
|
||||
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/policies"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// TransactionsPresent is an evaluator to make sure transactions send to the execution engine
|
||||
// appear in consensus client blocks' execution payload.
|
||||
var TransactionsPresent = types.Evaluator{
|
||||
Name: "transactions_present_at_epoch_%d",
|
||||
Policy: policies.AfterNthEpoch(helpers.BellatrixE2EForkEpoch),
|
||||
Evaluation: transactionsPresent,
|
||||
}
|
||||
|
||||
func transactionsPresent(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch.Sub(1)}}
|
||||
blks, err := client.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
expectedTxNum := int(math.Round(float64(params.E2ETestConfig().SlotsPerEpoch) * float64(e2e.NumOfExecEngineTxs) * e2e.ExpectedExecEngineTxsThreshold))
|
||||
var numberOfTxs int
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
switch ctr.Block.(type) {
|
||||
case *ethpb.BeaconBlockContainer_BellatrixBlock:
|
||||
numberOfTxs += len(ctr.GetBellatrixBlock().Block.Body.ExecutionPayload.Transactions)
|
||||
}
|
||||
}
|
||||
if numberOfTxs < expectedTxNum {
|
||||
return errors.Errorf(
|
||||
"not enough transactions in execution payload, expected=%d vs actual=%d",
|
||||
expectedTxNum,
|
||||
numberOfTxs,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,14 +14,21 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ForkTransition ensures that the hard fork has occurred successfully.
|
||||
var ForkTransition = types.Evaluator{
|
||||
Name: "fork_transition_%d",
|
||||
// AltairForkTransition ensures that the Altair hard fork has occurred successfully.
|
||||
var AltairForkTransition = types.Evaluator{
|
||||
Name: "altair_fork_transition_%d",
|
||||
Policy: policies.OnEpoch(helpers.AltairE2EForkEpoch),
|
||||
Evaluation: forkOccurs,
|
||||
Evaluation: altairForkOccurs,
|
||||
}
|
||||
|
||||
func forkOccurs(conns ...*grpc.ClientConn) error {
|
||||
// BellatrixForkTransition ensures that the Bellatrix hard fork has occurred successfully.
|
||||
var BellatrixForkTransition = types.Evaluator{
|
||||
Name: "bellatrix_fork_transition_%d",
|
||||
Policy: policies.OnEpoch(helpers.BellatrixE2EForkEpoch),
|
||||
Evaluation: bellatrixForkOccurs,
|
||||
}
|
||||
|
||||
func altairForkOccurs(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -62,3 +69,48 @@ func forkOccurs(conns ...*grpc.ClientConn) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func bellatrixForkOccurs(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
}
|
||||
fSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() == context.Canceled {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
res, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res == nil || res.Block == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() == nil && res.GetAltairBlock() == nil && res.GetBellatrixBlock() == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() != nil {
|
||||
return errors.New("phase 0 block returned after bellatrix fork has occurred")
|
||||
}
|
||||
if res.GetAltairBlock() != nil {
|
||||
return errors.New("altair block returned after bellatrix fork has occurred")
|
||||
}
|
||||
blk, err := wrapperv2.WrappedSignedBeaconBlock(res.GetBellatrixBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := coreHelper.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
if blk.Block().Slot() < fSlot {
|
||||
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -419,5 +419,8 @@ func convertToBlockInterface(obj *ethpb.BeaconBlockContainer) (block.SignedBeaco
|
||||
if obj.GetBellatrixBlock() != nil {
|
||||
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
|
||||
}
|
||||
if obj.GetBellatrixBlock() != nil {
|
||||
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
|
||||
}
|
||||
return nil, errors.New("container has no block")
|
||||
}
|
||||
|
||||
@@ -54,11 +54,13 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
|
||||
ev.ProposeVoluntaryExit,
|
||||
ev.ValidatorHasExited,
|
||||
ev.ColdStateCheckpoint,
|
||||
ev.ForkTransition,
|
||||
ev.AltairForkTransition,
|
||||
ev.BellatrixForkTransition,
|
||||
ev.APIMiddlewareVerifyIntegrity,
|
||||
ev.APIGatewayV1Alpha1VerifyIntegrity,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.TransactionsPresent,
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -19,7 +19,7 @@ type testArgs struct {
|
||||
useWeb3RemoteSigner bool
|
||||
}
|
||||
|
||||
func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
func TestEndToEnd_MinimalConfigV(t *testing.T) {
|
||||
e2eMinimal(t, &testArgs{
|
||||
usePrysmSh: false,
|
||||
useWeb3RemoteSigner: false,
|
||||
@@ -83,12 +83,14 @@ func e2eMinimal(t *testing.T, args *testArgs) {
|
||||
ev.ValidatorHasExited,
|
||||
ev.ValidatorsVoteWithTheMajority,
|
||||
ev.ColdStateCheckpoint,
|
||||
ev.ForkTransition,
|
||||
ev.AltairForkTransition,
|
||||
ev.BellatrixForkTransition,
|
||||
ev.APIMiddlewareVerifyIntegrity,
|
||||
ev.APIGatewayV1Alpha1VerifyIntegrity,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.ValidatorSyncParticipation,
|
||||
ev.TransactionsPresent,
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -31,6 +31,7 @@ type ports struct {
|
||||
Eth1RPCPort int
|
||||
Eth1AuthRPCPort int
|
||||
Eth1WSPort int
|
||||
Eth1ProxyPort int
|
||||
PrysmBeaconNodeRPCPort int
|
||||
PrysmBeaconNodeUDPPort int
|
||||
PrysmBeaconNodeTCPPort int
|
||||
@@ -69,6 +70,12 @@ var StandardLighthouseNodeCount = 2
|
||||
// DepositCount is the amount of deposits E2E makes on a separate validator client.
|
||||
var DepositCount = uint64(64)
|
||||
|
||||
// NumOfExecEngineTxs is the number of transaction sent to the execution engine.
|
||||
var NumOfExecEngineTxs = uint64(200)
|
||||
|
||||
// ExpectedExecEngineTxsThreshold is the portion of execution engine transactions we expect to find in blocks.
|
||||
var ExpectedExecEngineTxsThreshold = 0.7
|
||||
|
||||
// Base port values.
|
||||
const (
|
||||
portSpan = 50
|
||||
@@ -80,6 +87,7 @@ const (
|
||||
Eth1RPCPort = Eth1Port + portSpan
|
||||
Eth1WSPort = Eth1Port + 2*portSpan
|
||||
Eth1AuthRPCPort = Eth1Port + 3*portSpan
|
||||
Eth1ProxyPort = Eth1Port + 4*portSpan
|
||||
|
||||
PrysmBeaconNodeRPCPort = 4150
|
||||
PrysmBeaconNodeUDPPort = PrysmBeaconNodeRPCPort + portSpan
|
||||
@@ -227,6 +235,10 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eth1ProxyPort, err := port(Eth1ProxyPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconNodeRPCPort, err := port(PrysmBeaconNodeRPCPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -269,6 +281,7 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
ports.Eth1RPCPort = eth1RPCPort
|
||||
ports.Eth1AuthRPCPort = eth1AuthPort
|
||||
ports.Eth1WSPort = eth1WSPort
|
||||
ports.Eth1ProxyPort = eth1ProxyPort
|
||||
ports.PrysmBeaconNodeRPCPort = beaconNodeRPCPort
|
||||
ports.PrysmBeaconNodeUDPPort = beaconNodeUDPPort
|
||||
ports.PrysmBeaconNodeTCPPort = beaconNodeTCPPort
|
||||
|
||||
@@ -69,7 +69,7 @@ type ValidatorService struct {
|
||||
db db.Database
|
||||
grpcHeaders []string
|
||||
graffiti []byte
|
||||
web3SignerConfig *remote_web3signer.SetupConfig
|
||||
Web3SignerConfig *remote_web3signer.SetupConfig
|
||||
feeRecipientConfig *validator_service_config.FeeRecipientConfig
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
interopKeysConfig: cfg.InteropKeysConfig,
|
||||
graffitiStruct: cfg.GraffitiStruct,
|
||||
logDutyCountDown: cfg.LogDutyCountDown,
|
||||
web3SignerConfig: cfg.Web3SignerConfig,
|
||||
Web3SignerConfig: cfg.Web3SignerConfig,
|
||||
feeRecipientConfig: cfg.FeeRecipientConfig,
|
||||
}, nil
|
||||
}
|
||||
@@ -204,7 +204,7 @@ func (v *ValidatorService) Start() {
|
||||
graffitiOrderedIndex: graffitiOrderedIndex,
|
||||
eipImportBlacklistedPublicKeys: slashablePublicKeys,
|
||||
logDutyCountDown: v.logDutyCountDown,
|
||||
Web3SignerConfig: v.web3SignerConfig,
|
||||
Web3SignerConfig: v.Web3SignerConfig,
|
||||
feeRecipientConfig: v.feeRecipientConfig,
|
||||
walletIntializedChannel: make(chan *wallet.Wallet, 1),
|
||||
}
|
||||
|
||||
@@ -117,6 +117,7 @@ func (v *validator) WaitForKeymanagerInitialization(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if v.useWeb && v.wallet == nil {
|
||||
log.Info("Waiting for keymanager to initialize validator client with web UI")
|
||||
// if wallet is not set, wait for it to be set through the UI
|
||||
km, err := waitForWebWalletInitialization(ctx, v.walletInitializedFeed, v.walletIntializedChannel)
|
||||
if err != nil {
|
||||
|
||||
@@ -32,5 +32,6 @@ go_test(
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"//validator/keymanager/remote:go_default_library",
|
||||
"//validator/keymanager/remote-web3signer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,8 +37,10 @@ go_test(
|
||||
srcs = ["keymanager_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//validator/keymanager/remote-web3signer/internal:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package remote_web3signer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
remote_utils "github.com/prysmaticlabs/prysm/validator/keymanager/remote-utils"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/internal"
|
||||
v1 "github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SetupConfig includes configuration values for initializing.
|
||||
@@ -48,6 +50,7 @@ type Keymanager struct {
|
||||
providedPublicKeys [][48]byte
|
||||
accountsChangedFeed *event.Feed
|
||||
validator *validator.Validate
|
||||
publicKeysUrlCalled bool
|
||||
}
|
||||
|
||||
// NewKeymanager instantiates a new web3signer key manager.
|
||||
@@ -55,12 +58,6 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
if cfg.BaseEndpoint == "" || !bytesutil.IsValidRoot(cfg.GenesisValidatorsRoot) {
|
||||
return nil, fmt.Errorf("invalid setup config, one or more configs are empty: BaseEndpoint: %v, GenesisValidatorsRoot: %#x", cfg.BaseEndpoint, cfg.GenesisValidatorsRoot)
|
||||
}
|
||||
if cfg.PublicKeysURL != "" && len(cfg.ProvidedPublicKeys) != 0 {
|
||||
return nil, errors.New("Either a provided list of public keys or a URL to a list of public keys must be provided, but not both")
|
||||
}
|
||||
if cfg.PublicKeysURL == "" && len(cfg.ProvidedPublicKeys) == 0 {
|
||||
return nil, errors.New("no valid public key options provided")
|
||||
}
|
||||
client, err := internal.NewApiClient(cfg.BaseEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create apiClient")
|
||||
@@ -72,6 +69,7 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
publicKeysURL: cfg.PublicKeysURL,
|
||||
providedPublicKeys: cfg.ProvidedPublicKeys,
|
||||
validator: validator.New(),
|
||||
publicKeysUrlCalled: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -79,12 +77,14 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
|
||||
// from the remote server or from the provided keys if there are no existing public keys set
|
||||
// or provides the existing keys in the keymanager.
|
||||
func (km *Keymanager) FetchValidatingPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
if km.publicKeysURL != "" && len(km.providedPublicKeys) == 0 {
|
||||
if km.publicKeysURL != "" && !km.publicKeysUrlCalled {
|
||||
providedPublicKeys, err := km.client.GetPublicKeys(ctx, km.publicKeysURL)
|
||||
if err != nil {
|
||||
erroredResponsesTotal.Inc()
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not get public keys from remote server url: %v", km.publicKeysURL))
|
||||
}
|
||||
// makes sure that if the public keys are deleted the validator does not call URL again.
|
||||
km.publicKeysUrlCalled = true
|
||||
km.providedPublicKeys = providedPublicKeys
|
||||
}
|
||||
return km.providedPublicKeys, nil
|
||||
@@ -231,13 +231,8 @@ func getSignRequestJson(ctx context.Context, validator *validator.Validate, requ
|
||||
}
|
||||
|
||||
// SubscribeAccountChanges returns the event subscription for changes to public keys.
|
||||
func (*Keymanager) SubscribeAccountChanges(_ chan [][48]byte) event.Subscription {
|
||||
// Not used right now.
|
||||
// Returns a stub for the time being as there is a danger of being slashed if the apiClient reloads keys dynamically.
|
||||
// Because there is no way to dynamically reload keys, add or remove remote keys we are returning a stub without any event updates for the time being.
|
||||
return event.NewSubscription(func(i <-chan struct{}) error {
|
||||
return nil
|
||||
})
|
||||
func (km *Keymanager) SubscribeAccountChanges(pubKeysChan chan [][fieldparams.BLSPubkeyLength]byte) event.Subscription {
|
||||
return km.accountsChangedFeed.Subscribe(pubKeysChan)
|
||||
}
|
||||
|
||||
// ExtractKeystores is not supported for the remote-web3signer keymanager type.
|
||||
@@ -278,3 +273,73 @@ func (km *Keymanager) ListKeymanagerAccounts(ctx context.Context, cfg keymanager
|
||||
remote_utils.DisplayRemotePublicKeys(validatingPubKeys)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPublicKeys imports a list of public keys into the keymanager for web3signer use. Returns status with message.
|
||||
func (km *Keymanager) AddPublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.ImportedRemoteKeysStatus, error) {
|
||||
if ctx == nil {
|
||||
return nil, errors.New("context is nil")
|
||||
}
|
||||
importedRemoteKeysStatuses := make([]*ethpbservice.ImportedRemoteKeysStatus, len(pubKeys))
|
||||
for i, pubKey := range pubKeys {
|
||||
found := false
|
||||
for _, key := range km.providedPublicKeys {
|
||||
if bytes.Equal(key[:], pubKey[:]) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
importedRemoteKeysStatuses[i] = ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_DUPLICATE,
|
||||
Message: fmt.Sprintf("Duplicate pubkey: %v, already in use", hexutil.Encode(pubKey[:])),
|
||||
}
|
||||
continue
|
||||
}
|
||||
km.providedPublicKeys = append(km.providedPublicKeys, pubKey)
|
||||
importedRemoteKeysStatuses[i] = ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_IMPORTED,
|
||||
Message: fmt.Sprintf("Successfully added pubkey: %v", hexutil.Encode(pubKey[:])),
|
||||
}
|
||||
log.Debug("Added pubkey to keymanager for web3signer", "pubkey", hexutil.Encode(pubKey[:]))
|
||||
}
|
||||
km.accountsChangedFeed.Send(km.providedPublicKeys)
|
||||
return importedRemoteKeysStatuses, nil
|
||||
}
|
||||
|
||||
// DeletePublicKeys removes a list of public keys from the keymanager for web3signer use. Returns status with message.
|
||||
func (km *Keymanager) DeletePublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.DeletedRemoteKeysStatus, error) {
|
||||
if ctx == nil {
|
||||
return nil, errors.New("context is nil")
|
||||
}
|
||||
deletedRemoteKeysStatuses := make([]*ethpbservice.DeletedRemoteKeysStatus, len(pubKeys))
|
||||
if len(km.providedPublicKeys) == 0 {
|
||||
for i := range deletedRemoteKeysStatuses {
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
|
||||
Message: "No pubkeys are set in validator",
|
||||
}
|
||||
}
|
||||
return deletedRemoteKeysStatuses, nil
|
||||
}
|
||||
for i, pubkey := range pubKeys {
|
||||
for in, key := range km.providedPublicKeys {
|
||||
if bytes.Equal(key[:], pubkey[:]) {
|
||||
km.providedPublicKeys = append(km.providedPublicKeys[:in], km.providedPublicKeys[in+1:]...)
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_DELETED,
|
||||
Message: fmt.Sprintf("Successfully deleted pubkey: %v", hexutil.Encode(pubkey[:])),
|
||||
}
|
||||
log.Debug("Deleted pubkey from keymanager for web3signer", "pubkey", hexutil.Encode(pubkey[:]))
|
||||
break
|
||||
}
|
||||
}
|
||||
if deletedRemoteKeysStatuses[i] == nil {
|
||||
deletedRemoteKeysStatuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
|
||||
Message: fmt.Sprintf("Pubkey: %v not found", hexutil.Encode(pubkey[:])),
|
||||
}
|
||||
}
|
||||
}
|
||||
km.accountsChangedFeed.Send(km.providedPublicKeys)
|
||||
return deletedRemoteKeysStatuses, nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/internal"
|
||||
@@ -266,3 +268,72 @@ func TestKeymanager_FetchValidatingPublicKeys_WithExternalURL_ThrowsError(t *tes
|
||||
assert.Nil(t, resp)
|
||||
assert.Equal(t, "could not get public keys from remote server url: http://example2.com/api/v1/eth2/publicKeys: mock error", fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
func TestKeymanager_AddPublicKeys(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69")
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
config := &SetupConfig{
|
||||
BaseEndpoint: "http://example.com",
|
||||
GenesisValidatorsRoot: root,
|
||||
}
|
||||
km, err := NewKeymanager(ctx, config)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
pubkey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820")
|
||||
require.NoError(t, err)
|
||||
publicKeys := [][fieldparams.BLSPubkeyLength]byte{
|
||||
bytesutil.ToBytes48(pubkey),
|
||||
}
|
||||
statuses, err := km.AddPublicKeys(ctx, publicKeys)
|
||||
require.NoError(t, err)
|
||||
for _, status := range statuses {
|
||||
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_IMPORTED, status.Status)
|
||||
}
|
||||
statuses, err = km.AddPublicKeys(ctx, publicKeys)
|
||||
require.NoError(t, err)
|
||||
for _, status := range statuses {
|
||||
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_DUPLICATE, status.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeymanager_DeletePublicKeys(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69")
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
config := &SetupConfig{
|
||||
BaseEndpoint: "http://example.com",
|
||||
GenesisValidatorsRoot: root,
|
||||
}
|
||||
km, err := NewKeymanager(ctx, config)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
pubkey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820")
|
||||
require.NoError(t, err)
|
||||
publicKeys := [][fieldparams.BLSPubkeyLength]byte{
|
||||
bytesutil.ToBytes48(pubkey),
|
||||
}
|
||||
statuses, err := km.AddPublicKeys(ctx, publicKeys)
|
||||
require.NoError(t, err)
|
||||
for _, status := range statuses {
|
||||
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_IMPORTED, status.Status)
|
||||
}
|
||||
|
||||
s, err := km.DeletePublicKeys(ctx, publicKeys)
|
||||
require.NoError(t, err)
|
||||
for _, status := range s {
|
||||
require.Equal(t, ethpbservice.DeletedRemoteKeysStatus_DELETED, status.Status)
|
||||
}
|
||||
|
||||
s, err = km.DeletePublicKeys(ctx, publicKeys)
|
||||
require.NoError(t, err)
|
||||
for _, status := range s {
|
||||
require.Equal(t, ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND, status.Status)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,16 @@ type KeyStoreExtractor interface {
|
||||
ExtractKeystores(ctx context.Context, publicKeys []bls.PublicKey, password string) ([]*Keystore, error)
|
||||
}
|
||||
|
||||
// PublicKeyAdder allows adding public keys to the keymanager.
|
||||
type PublicKeyAdder interface {
|
||||
AddPublicKeys(ctx context.Context, publicKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.ImportedRemoteKeysStatus, error)
|
||||
}
|
||||
|
||||
// PublicKeyDeleter allows deleting public keys set in keymanager.
|
||||
type PublicKeyDeleter interface {
|
||||
DeletePublicKeys(ctx context.Context, publicKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.DeletedRemoteKeysStatus, error)
|
||||
}
|
||||
|
||||
type ListKeymanagerAccountConfig struct {
|
||||
ShowDepositData bool
|
||||
ShowPrivateKeys bool
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/derived"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/local"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/remote"
|
||||
remote_web3signer "github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,6 +26,9 @@ var (
|
||||
_ = keymanager.Importer(&derived.Keymanager{})
|
||||
_ = keymanager.Deleter(&local.Keymanager{})
|
||||
_ = keymanager.Deleter(&derived.Keymanager{})
|
||||
|
||||
_ = keymanager.PublicKeyAdder(&remote_web3signer.Keymanager{})
|
||||
_ = keymanager.PublicKeyDeleter(&remote_web3signer.Keymanager{})
|
||||
)
|
||||
|
||||
func TestKeystoreContainsPath(t *testing.T) {
|
||||
|
||||
@@ -117,7 +117,7 @@ func NewValidatorClient(cliCtx *cli.Context) (*ValidatorClient, error) {
|
||||
// client via a web portal, we start the validator client in a different way.
|
||||
if cliCtx.IsSet(flags.EnableWebFlag.Name) {
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) || cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
|
||||
return nil, errors.New("web3signer cannot be used with --web")
|
||||
log.Warn("Remote Keymanager API enabled. Prysm web does not properly support web3signer at this time")
|
||||
}
|
||||
log.Info("Enabling web portal to manage the validator client")
|
||||
if err := validatorClient.initializeForWeb(cliCtx); err != nil {
|
||||
@@ -191,12 +191,8 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context) error {
|
||||
dataDir := cliCtx.String(flags.WalletDirFlag.Name)
|
||||
if !cliCtx.IsSet(flags.InteropNumValidators.Name) {
|
||||
// Custom Check For Web3Signer
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) || cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) && cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
|
||||
c.wallet = wallet.NewWalletForWeb3Signer()
|
||||
} else {
|
||||
return errors.New("--validators-external-signer-url and --validators-external-signer-public-keys must be used together")
|
||||
}
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
|
||||
c.wallet = wallet.NewWalletForWeb3Signer()
|
||||
} else {
|
||||
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
return nil, wallet.ErrNoWalletFound
|
||||
@@ -275,24 +271,28 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context) error {
|
||||
|
||||
func (c *ValidatorClient) initializeForWeb(cliCtx *cli.Context) error {
|
||||
var err error
|
||||
|
||||
// Read the wallet password file from the cli context.
|
||||
if err = setWalletPasswordFilePath(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not read wallet password file")
|
||||
}
|
||||
|
||||
// Read the wallet from the specified path.
|
||||
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not open wallet")
|
||||
}
|
||||
c.wallet = w
|
||||
dataDir := cliCtx.String(flags.WalletDirFlag.Name)
|
||||
if c.wallet != nil {
|
||||
dataDir = c.wallet.AccountsDir()
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
|
||||
c.wallet = wallet.NewWalletForWeb3Signer()
|
||||
} else {
|
||||
// Read the wallet password file from the cli context.
|
||||
if err = setWalletPasswordFilePath(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not read wallet password file")
|
||||
}
|
||||
|
||||
// Read the wallet from the specified path.
|
||||
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not open wallet")
|
||||
}
|
||||
c.wallet = w
|
||||
if c.wallet != nil {
|
||||
dataDir = c.wallet.AccountsDir()
|
||||
}
|
||||
}
|
||||
|
||||
if cliCtx.String(cmd.DataDirFlag.Name) != cmd.DefaultDataDir() {
|
||||
dataDir = cliCtx.String(cmd.DataDirFlag.Name)
|
||||
}
|
||||
@@ -439,9 +439,8 @@ func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error {
|
||||
|
||||
func web3SignerConfig(cliCtx *cli.Context) (*remote_web3signer.SetupConfig, error) {
|
||||
var web3signerConfig *remote_web3signer.SetupConfig
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) && cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
|
||||
urlStr := cliCtx.String(flags.Web3SignerURLFlag.Name)
|
||||
publicKeysStr := cliCtx.String(flags.Web3SignerPublicValidatorKeysFlag.Name)
|
||||
u, err := url.ParseRequestURI(urlStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "web3signer url %s is invalid", urlStr)
|
||||
@@ -453,19 +452,22 @@ func web3SignerConfig(cliCtx *cli.Context) (*remote_web3signer.SetupConfig, erro
|
||||
BaseEndpoint: u.String(),
|
||||
GenesisValidatorsRoot: nil,
|
||||
}
|
||||
pURL, err := url.ParseRequestURI(publicKeysStr)
|
||||
if err == nil && pURL.Scheme != "" && pURL.Host != "" {
|
||||
web3signerConfig.PublicKeysURL = publicKeysStr
|
||||
} else {
|
||||
var validatorKeys [][48]byte
|
||||
for _, key := range strings.Split(publicKeysStr, ",") {
|
||||
decodedKey, decodeErr := hexutil.Decode(key)
|
||||
if decodeErr != nil {
|
||||
return nil, errors.Wrapf(decodeErr, "could not decode public key for web3signer: %s", key)
|
||||
if cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
|
||||
publicKeysStr := cliCtx.String(flags.Web3SignerPublicValidatorKeysFlag.Name)
|
||||
pURL, err := url.ParseRequestURI(publicKeysStr)
|
||||
if err == nil && pURL.Scheme != "" && pURL.Host != "" {
|
||||
web3signerConfig.PublicKeysURL = publicKeysStr
|
||||
} else {
|
||||
var validatorKeys [][48]byte
|
||||
for _, key := range strings.Split(publicKeysStr, ",") {
|
||||
decodedKey, decodeErr := hexutil.Decode(key)
|
||||
if decodeErr != nil {
|
||||
return nil, errors.Wrapf(decodeErr, "could not decode public key for web3signer: %s", key)
|
||||
}
|
||||
validatorKeys = append(validatorKeys, bytesutil.ToBytes48(decodedKey))
|
||||
}
|
||||
validatorKeys = append(validatorKeys, bytesutil.ToBytes48(decodedKey))
|
||||
web3signerConfig.ProvidedPublicKeys = validatorKeys
|
||||
}
|
||||
web3signerConfig.ProvidedPublicKeys = validatorKeys
|
||||
}
|
||||
}
|
||||
return web3signerConfig, nil
|
||||
|
||||
@@ -112,6 +112,7 @@ go_test(
|
||||
"//validator/keymanager/remote-web3signer:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"//validator/testing:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
|
||||
@@ -17,6 +17,7 @@ func (f *ValidatorEndpointFactory) IsNil() bool {
|
||||
func (*ValidatorEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/keystores",
|
||||
"/eth/v1/remotekeys",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +31,12 @@ func (*ValidatorEndpointFactory) Create(path string) (*apimiddleware.Endpoint, e
|
||||
endpoint.PostResponse = &importKeystoresResponseJson{}
|
||||
endpoint.DeleteRequest = &deleteKeystoresRequestJson{}
|
||||
endpoint.DeleteResponse = &deleteKeystoresResponseJson{}
|
||||
case "/eth/v1/remotekeys":
|
||||
endpoint.GetResponse = &listRemoteKeysResponseJson{}
|
||||
endpoint.PostRequest = &importRemoteKeysRequestJson{}
|
||||
endpoint.PostResponse = &importRemoteKeysResponseJson{}
|
||||
endpoint.DeleteRequest = &deleteRemoteKeysRequestJson{}
|
||||
endpoint.DeleteResponse = &deleteRemoteKeysResponseJson{}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -32,3 +32,37 @@ type deleteKeystoresResponseJson struct {
|
||||
Statuses []*statusJson `json:"data"`
|
||||
SlashingProtection string `json:"slashing_protection"`
|
||||
}
|
||||
|
||||
//remote keymanager api
|
||||
|
||||
type listRemoteKeysResponseJson struct {
|
||||
Keystores []*remoteKeysListJson `json:"data"`
|
||||
}
|
||||
|
||||
type remoteKeysListJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
Url string `json:"url"`
|
||||
Readonly bool `json:"readonly"`
|
||||
}
|
||||
|
||||
type remoteKeysJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
Url string `json:"url"`
|
||||
Readonly bool `json:"readonly"`
|
||||
}
|
||||
|
||||
type importRemoteKeysRequestJson struct {
|
||||
Keystores []*remoteKeysJson `json:"remote_keys"`
|
||||
}
|
||||
|
||||
type importRemoteKeysResponseJson struct {
|
||||
Statuses []*statusJson `json:"data"`
|
||||
}
|
||||
|
||||
type deleteRemoteKeysRequestJson struct {
|
||||
PublicKeys []string `json:"pubkeys" hex:"true"`
|
||||
}
|
||||
|
||||
type deleteRemoteKeysResponseJson struct {
|
||||
Statuses []*statusJson `json:"data"`
|
||||
}
|
||||
|
||||
@@ -118,6 +118,116 @@ func TestDeleteKeystores_JSONisEqual(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestListRemoteKeys_JSONisEqual(t *testing.T) {
|
||||
middlewareResponse := &listRemoteKeysResponseJson{
|
||||
Keystores: []*remoteKeysListJson{
|
||||
&remoteKeysListJson{
|
||||
Pubkey: "0x0",
|
||||
Url: "http://localhost:8080",
|
||||
Readonly: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
protoResponse := &service.ListRemoteKeysResponse{
|
||||
Data: []*service.ListRemoteKeysResponse_Keystore{
|
||||
&service.ListRemoteKeysResponse_Keystore{
|
||||
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
Url: "http://localhost:8080",
|
||||
Readonly: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
listResp, err := areJsonPropertyNamesEqual(middlewareResponse, protoResponse)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, listResp, true)
|
||||
|
||||
resp, err := areJsonPropertyNamesEqual(middlewareResponse.Keystores[0], protoResponse.Data[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp, true)
|
||||
}
|
||||
|
||||
func TestImportRemoteKeys_JSONisEqual(t *testing.T) {
|
||||
importKeystoresRequest := &importRemoteKeysRequestJson{}
|
||||
|
||||
protoImportRequest := &service.ImportRemoteKeysRequest{
|
||||
RemoteKeys: []*service.ImportRemoteKeysRequest_Keystore{
|
||||
&service.ImportRemoteKeysRequest_Keystore{
|
||||
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
Url: "http://localhost:8080",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
requestResp, err := areJsonPropertyNamesEqual(importKeystoresRequest, protoImportRequest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, requestResp, true)
|
||||
|
||||
importKeystoresResponse := &importRemoteKeysResponseJson{
|
||||
Statuses: []*statusJson{
|
||||
&statusJson{
|
||||
Status: "Error",
|
||||
Message: "a",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
protoImportKeystoresResponse := &service.ImportRemoteKeysResponse{
|
||||
Data: []*service.ImportedRemoteKeysStatus{
|
||||
&service.ImportedRemoteKeysStatus{
|
||||
Status: service.ImportedRemoteKeysStatus_ERROR,
|
||||
Message: "a",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ImportResp, err := areJsonPropertyNamesEqual(importKeystoresResponse, protoImportKeystoresResponse)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ImportResp, true)
|
||||
|
||||
resp, err := areJsonPropertyNamesEqual(importKeystoresResponse.Statuses[0], protoImportKeystoresResponse.Data[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp, true)
|
||||
}
|
||||
|
||||
func TestDeleteRemoteKeys_JSONisEqual(t *testing.T) {
|
||||
deleteKeystoresRequest := &deleteRemoteKeysRequestJson{}
|
||||
|
||||
protoDeleteRequest := &service.DeleteRemoteKeysRequest{
|
||||
Pubkeys: [][]byte{[]byte{}},
|
||||
}
|
||||
|
||||
requestResp, err := areJsonPropertyNamesEqual(deleteKeystoresRequest, protoDeleteRequest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, requestResp, true)
|
||||
|
||||
deleteKeystoresResponse := &deleteRemoteKeysResponseJson{
|
||||
Statuses: []*statusJson{
|
||||
&statusJson{
|
||||
Status: "Error",
|
||||
Message: "a",
|
||||
},
|
||||
},
|
||||
}
|
||||
protoDeleteResponse := &service.DeleteRemoteKeysResponse{
|
||||
Data: []*service.DeletedRemoteKeysStatus{
|
||||
&service.DeletedRemoteKeysStatus{
|
||||
Status: service.DeletedRemoteKeysStatus_ERROR,
|
||||
Message: "a",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deleteResp, err := areJsonPropertyNamesEqual(deleteKeystoresResponse, protoDeleteResponse)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, deleteResp, true)
|
||||
|
||||
resp, err := areJsonPropertyNamesEqual(deleteKeystoresResponse.Statuses[0], protoDeleteResponse.Data[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp, true)
|
||||
}
|
||||
|
||||
// note: this does not do a deep comparison of the structs
|
||||
func areJsonPropertyNamesEqual(internal, proto interface{}) (bool, error) {
|
||||
internalJSON, err := json.Marshal(internal)
|
||||
|
||||
@@ -30,7 +30,7 @@ func (s *Server) ListKeystores(
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
|
||||
}
|
||||
if s.wallet.KeymanagerKind() != keymanager.Derived && s.wallet.KeymanagerKind() != keymanager.Local {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Prysm validator keys are not stored locally with this keymanager type.")
|
||||
@@ -67,7 +67,7 @@ func (s *Server) ImportKeystores(
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get keymanager: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get keymanager (possibly due to beacon node unavailable): %v", err)
|
||||
}
|
||||
importer, ok := km.(keymanager.Importer)
|
||||
if !ok {
|
||||
@@ -150,7 +150,7 @@ func (s *Server) DeleteKeystores(
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get keymanager: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get keymanager (possibly due to beacon node unavailable): %v", err)
|
||||
}
|
||||
if len(req.Pubkeys) == 0 {
|
||||
return ðpbservice.DeleteKeystoresResponse{Data: make([]*ethpbservice.DeletedKeystoreStatus, 0)}, nil
|
||||
@@ -251,3 +251,134 @@ func (s *Server) slashingProtectionHistoryForDeletedKeys(
|
||||
}
|
||||
return slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, filteredKeys...)
|
||||
}
|
||||
|
||||
// ListRemoteKeys returns a list of all public keys defined for web3signer keymanager type.
|
||||
func (s *Server) ListRemoteKeys(ctx context.Context, _ *empty.Empty) (*ethpbservice.ListRemoteKeysResponse, error) {
|
||||
if !s.walletInitialized {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
|
||||
}
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
|
||||
}
|
||||
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
|
||||
}
|
||||
pubKeys, err := km.FetchValidatingPublicKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve keystores: %v", err)
|
||||
}
|
||||
keystoreResponse := make([]*ethpbservice.ListRemoteKeysResponse_Keystore, len(pubKeys))
|
||||
for i := 0; i < len(pubKeys); i++ {
|
||||
keystoreResponse[i] = ðpbservice.ListRemoteKeysResponse_Keystore{
|
||||
Pubkey: pubKeys[i][:],
|
||||
Url: s.validatorService.Web3SignerConfig.BaseEndpoint,
|
||||
Readonly: true,
|
||||
}
|
||||
}
|
||||
return ðpbservice.ListRemoteKeysResponse{
|
||||
Data: keystoreResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImportRemoteKeys imports a list of public keys defined for web3signer keymanager type.
|
||||
func (s *Server) ImportRemoteKeys(ctx context.Context, req *ethpbservice.ImportRemoteKeysRequest) (*ethpbservice.ImportRemoteKeysResponse, error) {
|
||||
if !s.walletInitialized {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
|
||||
}
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err))
|
||||
}
|
||||
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
|
||||
}
|
||||
adder, ok := km.(keymanager.PublicKeyAdder)
|
||||
if !ok {
|
||||
statuses := groupImportRemoteKeysErrors(req, "Keymanager kind cannot import public keys for web3signer keymanager type.")
|
||||
return ðpbservice.ImportRemoteKeysResponse{Data: statuses}, nil
|
||||
}
|
||||
|
||||
remoteKeys := make([][fieldparams.BLSPubkeyLength]byte, len(req.RemoteKeys))
|
||||
isUrlUsed := false
|
||||
for i, obj := range req.RemoteKeys {
|
||||
remoteKeys[i] = bytesutil.ToBytes48(obj.Pubkey)
|
||||
if obj.Url != "" {
|
||||
isUrlUsed = true
|
||||
}
|
||||
}
|
||||
if isUrlUsed {
|
||||
log.Warnf("Setting web3signer base url for imported keys is not supported. Prysm only uses the url from --validators-external-signer-url flag for web3signer.")
|
||||
}
|
||||
|
||||
statuses, err := adder.AddPublicKeys(ctx, remoteKeys)
|
||||
if err != nil {
|
||||
sts := groupImportRemoteKeysErrors(req, fmt.Sprintf("Could not add keys;error: %v", err))
|
||||
return ðpbservice.ImportRemoteKeysResponse{Data: sts}, nil
|
||||
}
|
||||
return ðpbservice.ImportRemoteKeysResponse{
|
||||
Data: statuses,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func groupImportRemoteKeysErrors(req *ethpbservice.ImportRemoteKeysRequest, errorMessage string) []*ethpbservice.ImportedRemoteKeysStatus {
|
||||
statuses := make([]*ethpbservice.ImportedRemoteKeysStatus, len(req.RemoteKeys))
|
||||
for i := 0; i < len(req.RemoteKeys); i++ {
|
||||
statuses[i] = ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_ERROR,
|
||||
Message: errorMessage,
|
||||
}
|
||||
}
|
||||
return statuses
|
||||
}
|
||||
|
||||
// DeleteRemoteKeys deletes a list of public keys defined for web3signer keymanager type.
|
||||
func (s *Server) DeleteRemoteKeys(ctx context.Context, req *ethpbservice.DeleteRemoteKeysRequest) (*ethpbservice.DeleteRemoteKeysResponse, error) {
|
||||
if !s.walletInitialized {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
|
||||
}
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
|
||||
}
|
||||
km, err := s.validatorService.Keymanager()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
|
||||
}
|
||||
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
|
||||
}
|
||||
deleter, ok := km.(keymanager.PublicKeyDeleter)
|
||||
if !ok {
|
||||
statuses := groupDeleteRemoteKeysErrors(req, "Keymanager kind cannot delete public keys for web3signer keymanager type.")
|
||||
return ðpbservice.DeleteRemoteKeysResponse{Data: statuses}, nil
|
||||
}
|
||||
remoteKeys := make([][fieldparams.BLSPubkeyLength]byte, len(req.Pubkeys))
|
||||
for i, key := range req.Pubkeys {
|
||||
remoteKeys[i] = bytesutil.ToBytes48(key)
|
||||
}
|
||||
statuses, err := deleter.DeletePublicKeys(ctx, remoteKeys)
|
||||
if err != nil {
|
||||
sts := groupDeleteRemoteKeysErrors(req, fmt.Sprintf("Could not delete keys;error: %v", err))
|
||||
return ðpbservice.DeleteRemoteKeysResponse{Data: sts}, nil
|
||||
}
|
||||
return ðpbservice.DeleteRemoteKeysResponse{
|
||||
Data: statuses,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func groupDeleteRemoteKeysErrors(req *ethpbservice.DeleteRemoteKeysRequest, errorMessage string) []*ethpbservice.DeletedRemoteKeysStatus {
|
||||
statuses := make([]*ethpbservice.DeletedRemoteKeysStatus, len(req.Pubkeys))
|
||||
for i := 0; i < len(req.Pubkeys); i++ {
|
||||
statuses[i] = ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_ERROR,
|
||||
Message: errorMessage,
|
||||
}
|
||||
}
|
||||
return statuses
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/google/uuid"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -524,3 +525,158 @@ func createRandomKeystore(t testing.TB, password string) *keymanager.Keystore {
|
||||
Name: encryptor.Name(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListRemoteKeys(t *testing.T) {
|
||||
t.Run("wallet not ready", func(t *testing.T) {
|
||||
s := Server{}
|
||||
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
|
||||
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
|
||||
})
|
||||
ctx := context.Background()
|
||||
w := wallet.NewWalletForWeb3Signer()
|
||||
root := make([]byte, fieldparams.RootLength)
|
||||
root[0] = 1
|
||||
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
pubkeys := [][fieldparams.BLSPubkeyLength]byte{bytesutil.ToBytes48(bytevalue)}
|
||||
config := &remote_web3signer.SetupConfig{
|
||||
BaseEndpoint: "http://example.com",
|
||||
GenesisValidatorsRoot: root,
|
||||
ProvidedPublicKeys: pubkeys,
|
||||
}
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Wallet: w,
|
||||
Validator: &mock.MockValidator{
|
||||
Km: km,
|
||||
},
|
||||
Web3SignerConfig: config,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
walletInitialized: true,
|
||||
wallet: w,
|
||||
validatorService: vs,
|
||||
}
|
||||
expectedKeys, err := km.FetchValidatingPublicKeys(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
|
||||
resp, err := s.ListRemoteKeys(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(resp.Data); i++ {
|
||||
require.DeepEqual(t, expectedKeys[i][:], resp.Data[i].Pubkey)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ImportRemoteKeys(t *testing.T) {
|
||||
t.Run("wallet not ready", func(t *testing.T) {
|
||||
s := Server{}
|
||||
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
|
||||
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
|
||||
})
|
||||
ctx := context.Background()
|
||||
w := wallet.NewWalletForWeb3Signer()
|
||||
root := make([]byte, fieldparams.RootLength)
|
||||
root[0] = 1
|
||||
config := &remote_web3signer.SetupConfig{
|
||||
BaseEndpoint: "http://example.com",
|
||||
GenesisValidatorsRoot: root,
|
||||
ProvidedPublicKeys: nil,
|
||||
}
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Wallet: w,
|
||||
Validator: &mock.MockValidator{
|
||||
Km: km,
|
||||
},
|
||||
Web3SignerConfig: config,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
walletInitialized: true,
|
||||
wallet: w,
|
||||
validatorService: vs,
|
||||
}
|
||||
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
remoteKeys := []*ethpbservice.ImportRemoteKeysRequest_Keystore{
|
||||
{
|
||||
Pubkey: bytevalue,
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
|
||||
resp, err := s.ImportRemoteKeys(context.Background(), ðpbservice.ImportRemoteKeysRequest{
|
||||
RemoteKeys: remoteKeys,
|
||||
})
|
||||
expectedStatuses := []*ethpbservice.ImportedRemoteKeysStatus{
|
||||
ðpbservice.ImportedRemoteKeysStatus{
|
||||
Status: ethpbservice.ImportedRemoteKeysStatus_IMPORTED,
|
||||
Message: fmt.Sprintf("Successfully added pubkey: %v", hexutil.Encode(bytevalue)),
|
||||
},
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(resp.Data); i++ {
|
||||
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_DeleteRemoteKeys(t *testing.T) {
|
||||
t.Run("wallet not ready", func(t *testing.T) {
|
||||
s := Server{}
|
||||
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
|
||||
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
|
||||
})
|
||||
ctx := context.Background()
|
||||
w := wallet.NewWalletForWeb3Signer()
|
||||
root := make([]byte, fieldparams.RootLength)
|
||||
root[0] = 1
|
||||
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
pubkeys := [][fieldparams.BLSPubkeyLength]byte{bytesutil.ToBytes48(bytevalue)}
|
||||
config := &remote_web3signer.SetupConfig{
|
||||
BaseEndpoint: "http://example.com",
|
||||
GenesisValidatorsRoot: root,
|
||||
ProvidedPublicKeys: pubkeys,
|
||||
}
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Wallet: w,
|
||||
Validator: &mock.MockValidator{
|
||||
Km: km,
|
||||
},
|
||||
Web3SignerConfig: config,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
walletInitialized: true,
|
||||
wallet: w,
|
||||
validatorService: vs,
|
||||
}
|
||||
|
||||
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
|
||||
resp, err := s.DeleteRemoteKeys(context.Background(), ðpbservice.DeleteRemoteKeysRequest{
|
||||
Pubkeys: [][]byte{bytevalue},
|
||||
})
|
||||
expectedStatuses := []*ethpbservice.DeletedRemoteKeysStatus{
|
||||
ðpbservice.DeletedRemoteKeysStatus{
|
||||
Status: ethpbservice.DeletedRemoteKeysStatus_DELETED,
|
||||
Message: fmt.Sprintf("Successfully deleted pubkey: %v", hexutil.Encode(bytevalue)),
|
||||
},
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(resp.Data); i++ {
|
||||
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
|
||||
|
||||
}
|
||||
expectedKeys, err := km.FetchValidatingPublicKeys(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(expectedKeys))
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user