Compare commits

...

17 Commits

Author SHA1 Message Date
terence tsao
a6442987f9 fix: test errors 2022-04-12 14:12:37 -07:00
terence tsao
4197da13ad Add: wss payload block hash as last default 2022-04-12 14:10:29 -07:00
Preston Van Loon
ecad5bbffc e2e: Provide e2e config yaml to web3signer (#10123)
* e2e: Provide e2e config yaml to web3signer

* fix build for //testing/endtoend:go_default_test

* Update with web3signer with teku fixes

* buildifier

* Add slasher case for web3signer

* Update testing/endtoend/minimal_e2e_test.go

* Update web3signer to 21.10.6

* Revert "Update web3signer to 21.10.6"

This reverts commit bdf3c408f2.

* Remove slasher part of web3signer e2e tests

* Revert "Remove slasher part of web3signer e2e tests"

This reverts commit 24249802ae.

* fix slasher web3signer test

* fixing build

* updating yaml to match testnet_e2e_config.go

* trying a different order to the e2e test and adding a log

* trying different way to kill process

* handling unhandled error

* testing changes to config WIP

* fixing bazel WIP

* fixing build

* ignoring test for now to test

* fixing bazel

* Test only web3signer e2e

* rolling back some commits to test

* fixing bazel

* trying an updated web3signer version

* changing flag to match version

* trying current version of develop for web3signer

* testing not using the --network property

* addressing build error

* testing config change

* reverting to go bakc to using the network file

* testing adding epochs per sync committee period

* rolling back configs

* removing check to test

* adding log to get sync committee duties and changing bellatrix fork epoch to something large and altair to epoch 1

* fixing bazel

* updating epoch in config file

* fixing more descrepencies between the configurations

* removing un used yaml

* removing goland added duplicates

* reverting using network minimal

* fixing bug

* rolling back some changes

* rolling back some changes

* rolling back changes

* making sure web3signer test doesn't make it to bellatrix fork yet

* reverting changes I did not touch

* undo comment

* Update testing/endtoend/deps.bzl

* Apply suggestions from code review

* rm nl

* fix //testing/endtoend:go_mainnet_test

* Remove unnecessary dep

* fix //testing/endtoend:go_mainnet_test

* addressing review comments

* fixing build and internal conflict

* removing web3signer slasher test as it's unneeded due to the interface nature of key signing, the regular slashing test is enough

* fix: The validator we fetch from the binary can only run before altair, if you add that and the web3signer then these things can never run together as the web3signer sets it to before bellatrix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: James He <james@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-04-12 16:57:46 +00:00
Nishant Das
407182387b fix it all (#10511) 2022-04-12 19:50:29 +08:00
terence tsao
ad0b0b503d Move GetTerminalBlockHash to powchain engine (#10500)
* Move GetTerminalBlockHash to powchain engine

* Update service.go

* Update service.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-12 10:19:07 +00:00
terence tsao
58f4ba758c Metrics tracking EE VALID/SYNCING/INVALID response counter (#10504)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-12 09:51:13 +00:00
james-prysm
64f64f06bf Remote Key Manager API(web3signer) (#10302)
* removing flag requirement, can run web3signer without predefined public keys

* placeholders for remote-keymanager-api

* adding proto and accountschangedfeed

* updating generated code

* fix imports

* fixing interface

* adding work in progress apimiddleware code

* started implementing functions for remote keymanager api

* fixing generted code from proto

* fixing protos

* fixing import format

* fixing proto generation again , didn't fix the first time

* fixing imports again

* continuing on implementing functions

* implementing add function

* implementing delete API function

* handling errors for API

* removing unusedcode and fixing format

* fixing bazel

* wip enable --web when running web3signer

* fixing wallet check for web3signer

* fixing apis

* adding list remote keys unit test

* import remote keys test

* delete pubkeys tests

* moving location of tests

* adding unit tests

* adding placeholder functions

* adding more unit tests

* fixing bazel

* fixing build

* fixing already slice issue with unit test

* fixing linting

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/node/node.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* adding comment on proto based on review

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding generated code based on review

* updating based on feedback

* fixing imports

* fixing formatting

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing event call

* fixing dependency

* updating bazel

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing comment from review

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-11 16:05:40 -04:00
terence tsao
e70055733f Save state to DB after proposer boost (#10509) 2022-04-11 16:51:49 +00:00
Radosław Kapka
36e4f49af0 Bellatrix evaluators (#10506)
* defensive nil check

* separate ExecutionPayload/Header from codegen

* tell bazel about this new file

* Merge: support terminal difficulty override (#9769)

* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* add in all the fixes

* fix evaluator bugs

* latest fixes

* sum

* fix to be configurable

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* fix broken conditional checks

* fix all issues

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Revert "fix broken conditional checks"

This reverts commit e118db6c20.

* LH multiclient working

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* fix race

* fix test

* Fix base fee per gas

* Fix notifypayload headroot

* tx fuzzer

* clean up with develop branch

* save progress

* 200tx/block

* add LH flags

* Sync with devleop

* cleanup

* cleanup

* hash

* fix build

* fix test

* fix go check

* fmt

* gosec

* Blocked stream

(cherry picked from commit f362af9862db680b6352692217ad5c08d44a1e86)

# Conflicts:
#	proto/prysm/v1alpha1/validator.pb.go

* remove duplicate param

* test

* revert some test changes

* Initial version of EE tx count

* evaluate all txs in epoch

* remove logs

* uncomment tests

* remove unwanted change

* parameterize ExpectedExecEngineTxsThreshold

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-11 13:45:22 +00:00
terence tsao
d98428dec4 Can prune nodes from canonical and payload maps (#10496)
* Can prune nodes from canonical and payload maps

* Update store_test.go

* prune payload hashes, canonical nodes and better testing

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-11 11:08:50 +00:00
Nishant Das
00b92e01d3 Fetch Non Finalized Deposits Better (#10505) 2022-04-11 09:59:22 +02:00
Potuz
ca5adbf7e4 Fix optimistic logging (#10503) 2022-04-09 23:09:04 +00:00
Nishant Das
a083b7a0a5 Set Auth Differently In Our Powchain Constructor (#10501) 2022-04-09 11:03:05 +02:00
Raul Jordan
dd5995b665 Proper Connection Management for ETH JSON-RPC Client for Startup and Runtime (#10498)
* begin connection management revamp

* kiln runs

* retry

* reconnect

* add

* rpc connect fix

* remove logging

* logs

* retry

* default value for web3flag

* test pass

* comments

* ensure auth works
2022-04-09 09:28:40 +08:00
Taranpreet26311
6903d52dde Added horusec (#10499)
* Added horusec

* Improving aesthetics

* Restrict branch
2022-04-08 10:52:04 -04:00
Potuz
ac8d27bcf1 Deal with node balance underflow (#10492)
* deal with balance underflow

* fix log format

* preston's review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-07 17:38:48 +00:00
Nishant Das
8d6afb3afd Fix Issues From Multiclient E2E (#10486)
* fix it

* fix index addition

* fix sync issues

* make it nicer
2022-04-07 01:52:24 +00:00
86 changed files with 4112 additions and 1594 deletions

22
.github/workflows/horusec.yaml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -66,6 +66,7 @@ go_library(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",

View File

@@ -138,6 +138,26 @@ var (
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_valid_node_count",
Help: "Count the number of valid nodes after newPayload EE call",
})
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_optimistic_node_count",
Help: "Count the number of optimistic nodes after newPayload EE call",
})
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_invalid_node_count",
Help: "Count the number of invalid nodes after newPayload EE call",
})
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_valid_node_count",
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -46,31 +46,15 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
finalizedHash, err := s.getFinalizedPayloadHash(ctx, finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
}
if finalizedBlock == nil || finalizedBlock.IsNil() {
finalizedBlock = s.getInitSyncBlock(s.ensureRootNotZeros(finalizedRoot))
if finalizedBlock == nil || finalizedBlock.IsNil() {
return nil, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
}
var finalizedHash []byte
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
return nil, errors.Wrap(err, "could not get finalized payload hash")
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash,
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -83,16 +67,18 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
@@ -104,6 +90,55 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
return payloadID, nil
}
// getFinalizedPayloadHash returns the finalized payload hash for the given finalized block root.
// It checks the following in order:
// 1. The finalized block exists in db
// 2. The finalized block exists in initial sync block cache
// 3. The finalized block is the weak subjectivity block and exists in db
// Error is returned if the finalized block is not found from above.
func (s *Service) getFinalizedPayloadHash(ctx context.Context, finalizedRoot [32]byte) ([32]byte, error) {
b, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
b = s.getInitSyncBlock(finalizedRoot)
if b != nil {
return getPayloadHash(b.Block())
}
r, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
return [32]byte{}, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
// getPayloadHash returns the payload hash for the input given block.
// zeros are returned if the block is older than bellatrix.
func getPayloadHash(b block.BeaconBlock) ([32]byte, error) {
if blocks.IsPreBellatrixVersion(b.Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := b.Body().ExecutionPayload()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
@@ -135,12 +170,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return false, nil
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
@@ -157,7 +194,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
newPayloadValidNodeCount.Inc()
// During the transition event, the transition block should be verified for sanity.
if blocks.IsPreBellatrixVersion(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.

View File

@@ -792,3 +792,74 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
require.NoError(t, err)
require.Equal(t, false, has)
}
func TestService_getFinalizedPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Use the block in DB
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
h, err := service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the block in init sync cache
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hello"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
service.initSyncBlocks[r] = blk
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the weak subjectivity sync block
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("howdy"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, r))
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// None of the above should error
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{'a'}))
_, err = service.getFinalizedPayloadHash(ctx, [32]byte{'a'})
require.ErrorContains(t, "does not exist in the db or our cache", err)
}
func TestService_getPayloadHash(t *testing.T) {
// Pre-bellatrix
blk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
h, err := getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, [32]byte{}, h)
// Post bellatrix
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
h, err = getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(bytesutil.PadTo([]byte("hi"), 32)), h)
}

View File

@@ -127,8 +127,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -148,6 +148,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -609,9 +612,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
@@ -402,10 +403,13 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
return errors.Wrap(err, "could not cast eth1 deposit index")
}
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
return nil

View File

@@ -1516,6 +1516,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(7))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
@@ -1529,8 +1530,64 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(5))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
gs2 := gs.Copy()
assert.NoError(t, gs2.SetEth1Data(&ethpb.Eth1Data{DepositCount: 15}))
assert.NoError(t, gs2.SetEth1DepositIndex(12))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
depositCache.InsertFinalizedDeposits(ctx, 2)
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
// Insert New Finalized State with higher deposit count.
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}

View File

@@ -36,7 +36,7 @@ type DepositFetcher interface {
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
}
// FinalizedDeposits stores the trie of deposits that have been included
@@ -246,7 +246,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
@@ -256,10 +256,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
return dc.allDeposits(untilBlk)
}
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}

View File

@@ -554,7 +554,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), nil)
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
assert.Equal(t, 2, len(deps))
}
@@ -611,7 +611,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
}

View File

@@ -105,6 +105,7 @@ type HeadAccessDatabase interface {
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}

View File

@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
}
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
return []*ethpb.Deposit{}
}

View File

@@ -22,11 +22,13 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -3,7 +3,6 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")

View File

@@ -2,12 +2,15 @@ package doublylinkedtree
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -250,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
return ErrNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": currentNode.balance,
"nodeWeight": currentNode.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
}
currentNode.balance -= oldBalance
}
}

View File

@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()

View File

@@ -3,9 +3,12 @@ package doublylinkedtree
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_head_slot",
@@ -48,10 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_validated_count",
Help: "The number of blocks that have been fully validated.",
},
)
)

View File

@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
}
n.optimistic = false
validatedCount.Inc()
return n.parent.setNodeAndParentValidated(ctx)
}

View File

@@ -22,12 +22,14 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -3,9 +3,12 @@ package protoarray
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-protoarray")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_head_slot",
@@ -48,10 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedNodesCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proto_array_validated_nodes_count",
Help: "The number of nodes that have been fully validated.",
},
)
)

View File

@@ -43,7 +43,6 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) er
if index == NonExistentNode {
break
}
validatedNodesCount.Inc()
}
return nil
}

View File

@@ -9,8 +9,10 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -426,6 +428,16 @@ func (s *Store) applyWeightChanges(
if nodeDelta < 0 {
d := uint64(-nodeDelta)
if n.weight < d {
s.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeDelta": d,
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
"nodeWeight": n.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
"previousProposerBoostScore": s.previousProposerBoostScore,
}).Warning("node with invalid weight, setting it to zero")
s.proposerBoostLock.RUnlock()
n.weight = 0
} else {
n.weight -= d
@@ -602,16 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
node := copyNode(s.nodes[idx])
parentIdx, ok := canonicalNodesMap[node.parent]
if ok {
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
currentIndex := uint64(len(canonicalNodes))
s.nodesIndices[node.root] = currentIndex
s.payloadIndices[node.payloadHash] = currentIndex
canonicalNodesMap[idx] = currentIndex
node.parent = parentIdx
canonicalNodes = append(canonicalNodes, node)
} else {
// Remove node and synced tip that is not part of finalized branch.
// Remove node that is not part of finalized branch.
delete(s.nodesIndices, node.root)
delete(s.canonicalNodes, node.root)
delete(s.payloadIndices, node.payloadHash)
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
s.canonicalNodes[finalizedRoot] = true
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
// Recompute the best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {

View File

@@ -375,7 +375,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
@@ -413,7 +413,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
s := &Store{nodes: nodes, nodesIndices: indices}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
@@ -441,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestDescendant: 1,
root: indexToHash(uint64(0)),
parent: NonExistentNode,
payloadHash: [32]byte{'A'},
},
{
slot: 101,
@@ -448,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
parent: 0,
payloadHash: [32]byte{'B'},
},
{
slot: 101,
@@ -455,6 +457,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
parent: 0,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
payloadHash: [32]byte{'C'},
},
}
s := &Store{
@@ -465,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
indexToHash(uint64(1)): 1,
indexToHash(uint64(2)): 2,
},
canonicalNodes: map[[32]byte]bool{
indexToHash(uint64(0)): true,
indexToHash(uint64(1)): true,
indexToHash(uint64(2)): true,
},
payloadIndices: map[[32]byte]uint64{
[32]byte{'A'}: 0,
[32]byte{'B'}: 1,
[32]byte{'C'}: 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, len(s.nodes), 1)
require.Equal(t, 1, len(s.nodes))
require.Equal(t, 1, len(s.nodesIndices))
require.Equal(t, 1, len(s.canonicalNodes))
require.Equal(t, 1, len(s.payloadIndices))
}
// This test starts with the following branching diagram
@@ -482,25 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
func TestStore_PruneSyncedTips(t *testing.T) {
func TestStore_PruneBranched(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, f.NodeCount())
tests := []struct {
finalizedRoot [32]byte
wantedCanonical [32]byte
wantedNonCanonical [32]byte
canonicalCount int
payloadHash [32]byte
payloadIndex uint64
nonExistentPayload [32]byte
}{
{
[32]byte{'f'},
[32]byte{'f'},
[32]byte{'a'},
1,
[32]byte{'F'},
0,
[32]byte{'H'},
},
{
[32]byte{'d'},
[32]byte{'e'},
[32]byte{'i'},
3,
[32]byte{'E'},
1,
[32]byte{'C'},
},
{
[32]byte{'b'},
[32]byte{'f'},
[32]byte{'h'},
5,
[32]byte{'D'},
3,
[32]byte{'A'},
},
}
for _, tc := range tests {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
f.store.pruneThreshold = 0
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
require.Equal(t, false, ok)
}
}
func TestStore_LeadsToViableHead(t *testing.T) {

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"block_cache.go",
"block_reader.go",
"check_transition_config.go",
@@ -16,6 +15,7 @@ go_library(
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
@@ -59,7 +59,6 @@ go_library(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -74,7 +73,6 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"auth_test.go",
"block_cache_test.go",
"block_reader_test.go",
"check_transition_config_test.go",
@@ -125,7 +123,6 @@ go_test(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_ethereum_go_ethereum//trie:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/network"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
)
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
return
}
case tm := <-ticker.C:
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
err = s.ExchangeTransitionConfiguration(ctx, cfg)
s.handleExchangeConfigurationError(err)
if !hasTtdReached {

View File

@@ -10,9 +10,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -49,8 +52,8 @@ type EngineCaller interface {
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
@@ -174,6 +177,78 @@ func (s *Service) ExchangeTransitionConfiguration(
return nil
}
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := s.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
@@ -251,3 +326,15 @@ func isTimeout(e error) bool {
t, ok := e.(httpTimeoutError)
return ok && t.Timeout()
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}

View File

@@ -418,6 +418,155 @@ func TestClient_HTTP(t *testing.T) {
})
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
client := mocks.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
}
b, e, err := client.GetTerminalBlockHash(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()

View File

@@ -1,9 +1,6 @@
package powchain
import (
"net/http"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
@@ -11,11 +8,9 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
type Option func(s *Service) error
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
}
}
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithJWTSecret(secret []byte) Option {
return func(c *Service) error {
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
return func(s *Service) error {
if len(secret) == 0 {
return nil
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: secret,
stringEndpoints := dedupEndpoints(endpointStrings)
endpoints := make([]network.Endpoint, len(stringEndpoints))
// Overwrite authorization type for all endpoints to be of a bearer
// type.
for i, e := range stringEndpoints {
hEndpoint := HttpEndpoint(e)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)
endpoints[i] = hEndpoint
}
c.cfg.httpRPCClient = &http.Client{
Timeout: DefaultRPCHTTPTimeout,
Transport: authTransport,
// Select first http endpoint in the provided list.
var currEndpoint network.Endpoint
if len(endpointStrings) > 0 {
currEndpoint = endpoints[0]
}
s.cfg.httpEndpoints = endpoints
s.cfg.currHttpEndpoint = currEndpoint
return nil
}
}

View File

@@ -0,0 +1,176 @@
package powchain
import (
"context"
"fmt"
"net/url"
"time"
"github.com/ethereum/go-ethereum/ethclient"
gethRPC "github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
// Attach the clients to the service struct.
fetcher := ethclient.NewClient(client)
s.rpcClient = client
s.httpLogger = fetcher
s.eth1DataFetcher = fetcher
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
if err != nil {
client.Close()
return errors.Wrap(err, "could not initialize deposit contract caller")
}
s.depositContractCaller = depositContractCaller
// Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
}
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
// connection and if this does not work, we fallback to the next endpoint if defined.
func (s *Service) pollConnectionStatus(ctx context.Context) {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
errorLogger(err, "Could not connect to execution client endpoint")
s.runError = err
s.fallbackToNextEndpoint()
}
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// Forces to retry an execution client connection.
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before redialing.
time.Sleep(backOffPeriod)
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
s.runError = err
return
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
s.updateCurrHttpEndpoint(primaryEndpoint)
}
// This is an inefficient way to search for the next endpoint, but given N is
// expected to be small, it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
if err != nil {
return nil, err
}
case "":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, err
}
client.SetHeader("Authorization", header)
}
return client, nil
}
// Checks the chain ID of the execution client to ensure
// it matches local parameters of what Prysm expects.
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
cID, err := client.ChainID(ctx)
if err != nil {
return err
}
wantChainID := params.BeaconConfig().DepositChainID
if cID.Uint64() != wantChainID {
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"math/big"
"net/http"
"reflect"
"runtime/debug"
"sort"
@@ -39,10 +38,8 @@ import (
"github.com/prysmaticlabs/prysm/container/trie"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
@@ -114,6 +111,7 @@ type Chain interface {
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
// fetching eth1 data from the clients.
type RPCDataFetcher interface {
Close()
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
// RPCClient defines the rpc methods required to interact with the eth1 node.
type RPCClient interface {
Close()
BatchCall(b []gethRPC.BatchElem) error
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
}
@@ -135,7 +134,6 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
httpRPCClient *http.Client
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
}
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a web3 service's main event loop.
func (s *Service) Start() {
if err := s.connectToPowChain(); err != nil {
log.WithError(err).Fatal("Could not connect to execution endpoint")
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
log.WithError(err).Error("Could not connect to execution endpoint")
}
log.WithFields(logrus.Fields{
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
}).Info("Connected to Ethereum execution client RPC")
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
// able to generate the genesis state.
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
@@ -253,7 +246,7 @@ func (s *Service) Start() {
s.isRunning = true
// Poll the execution client connection and fallback if errors occur.
go s.pollConnectionStatus()
go s.pollConnectionStatus(s.ctx)
// Check transition configuration for the engine API client in the background.
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
if s.cancel != nil {
defer s.cancel()
}
s.closeClients()
if s.rpcClient != nil {
s.rpcClient.Close()
}
if s.eth1DataFetcher != nil {
s.eth1DataFetcher.Close()
}
return nil
}
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
func (s *Service) CurrentETH1ConnectionError() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
httpClient.Close()
rpcClient.Close()
return err
return s.runError
}
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
@@ -358,10 +353,17 @@ func (s *Service) ETH1Endpoints() []string {
func (s *Service) ETH1ConnectionErrors() []error {
var errs []error
for _, ep := range s.cfg.httpEndpoints {
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
httpClient.Close()
rpcClient.Close()
errs = append(errs, err)
client, err := s.newRPCClientWithAuth(s.ctx, ep)
if err != nil {
errs = append(errs, err)
continue
}
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
client.Close()
errs = append(errs, err)
continue
}
client.Close()
}
return errs
}
@@ -376,146 +378,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
return latestValidBlock, nil
}
func (s *Service) connectToPowChain() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
if err != nil {
return errors.Wrap(err, "could not initialize deposit contract caller")
}
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
return errors.New("execution client RPC is nil")
}
s.httpLogger = httpClient
s.eth1DataFetcher = httpClient
s.depositContractCaller = depositContractCaller
s.rpcClient = rpcClient
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
if err != nil {
return nil, nil, err
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, nil, err
}
httpRPCClient.SetHeader("Authorization", header)
}
httpClient := ethclient.NewClient(httpRPCClient)
// Add a method to clean-up and close clients in the event
// of any connection failure.
closeClients := func() {
httpRPCClient.Close()
httpClient.Close()
}
// Make a simple call to ensure we are actually connected to a working node.
cID, err := httpClient.ChainID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
nID, err := httpClient.NetworkID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
if cID.Uint64() != params.BeaconConfig().DepositChainID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
}
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
}
return httpClient, httpRPCClient, nil
}
// closes down our active eth1 clients.
func (s *Service) closeClients() {
gethClient, ok := s.rpcClient.(*gethRPC.Client)
if ok {
gethClient.Close()
}
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
if ok {
httpClient.Close()
}
}
func (s *Service) pollConnectionStatus() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
errConnect := s.connectToPowChain()
if errConnect != nil {
errorLogger(errConnect, "Could not connect to powchain endpoint")
s.runError = errConnect
s.fallbackToNextEndpoint()
continue
}
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// checks if the eth1 node is healthy and ready to serve before
// fetching data from it.
func (s *Service) isEth1NodeSynced() (bool, error) {
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
if err != nil {
return false, err
}
if syncProg != nil {
return false, nil
}
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
return false, err
}
return !eth1HeadIsBehind(head.Time), nil
}
// Reconnect to eth1 node in case of any failure.
func (s *Service) retryETH1Node(err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before
// resuming dialing the eth1 node.
time.Sleep(backOffPeriod)
if err := s.connectToPowChain(); err != nil {
s.runError = err
return
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
if len(ctrs) == 0 {
return nil
@@ -650,7 +512,7 @@ func (s *Service) handleETH1FollowDistance() {
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
// check that web3 client is syncing
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
log.Warn("eth1 client is not syncing")
log.Warn("Execution client is not syncing")
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
@@ -680,6 +542,15 @@ func (s *Service) handleETH1FollowDistance() {
}
func (s *Service) initPOWService() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
// Run in a select loop to retry in the event of any failures.
for {
@@ -690,8 +561,8 @@ func (s *Service) initPOWService() {
ctx := s.ctx
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
if err != nil {
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve latest execution client header")
continue
}
@@ -700,14 +571,14 @@ func (s *Service) initPOWService() {
s.latestEth1Data.BlockTime = header.Time
if err := s.processPastLogs(ctx); err != nil {
log.Errorf("Unable to process past logs %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to process past deposit contract logs")
continue
}
// Cache eth1 headers from our voting period.
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
log.Errorf("Unable to process past headers %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to cache headers for execution client votes")
continue
}
// Handle edge case with embedded genesis state by fetching genesis header to determine
@@ -720,15 +591,15 @@ func (s *Service) initPOWService() {
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
if err != nil {
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
continue
}
genBlock = genHeader.Number.Uint64()
}
s.chainStartData.GenesisBlock = genBlock
if err := s.savePowchainData(ctx); err != nil {
log.Errorf("Unable to save powchain data: %v", err)
errorLogger(err, "Unable to save execution client data")
}
}
return
@@ -757,17 +628,16 @@ func (s *Service) run(done <-chan struct{}) {
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
log.WithError(err).Debug("Could not fetch latest eth1 header")
s.retryETH1Node(err)
continue
}
if eth1HeadIsBehind(head.Time) {
s.retryExecutionClientConnection(s.ctx, err)
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
s.retryETH1Node(errFarBehind)
continue
}
s.processBlockHeader(head)
s.handleETH1FollowDistance()
s.checkDefaultEndpoint()
s.checkDefaultEndpoint(s.ctx)
case <-chainstartTicker.C:
if s.chainStartData.Chainstarted {
chainstartTicker.Stop()
@@ -854,59 +724,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
return hdr.Number.Uint64(), nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint() {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
if err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
log.Info("Primary endpoint ready again, switching back to it")
// Close the clients and let our main connection routine
// properly connect with it.
httpClient.Close()
rpcClient.Close()
// Close current active clients.
s.closeClients()
// Switch back to primary endpoint and try connecting
// to it again.
s.updateCurrHttpEndpoint(primaryEndpoint)
s.retryETH1Node(nil)
}
// This is an inefficient way to search for the next endpoint, but given N is expected to be
// small ( < 25), it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// initializes our service from the provided eth1data object by initializing all the relevant
// fields and data.
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {

View File

@@ -42,6 +42,8 @@ type goodLogger struct {
backend *backends.SimulatedBackend
}
func (_ *goodLogger) Close() {}
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
if g.backend == nil {
return new(event.Feed).Subscribe(ch), nil
@@ -80,6 +82,8 @@ type goodFetcher struct {
backend *backends.SimulatedBackend
}
func (_ *goodFetcher) Close() {}
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
now := time.Now()
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
testAcc.Backend.Commit()
synced, err := web3Service.isEth1NodeSynced()
require.NoError(t, err)
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
}
func TestFollowBlock_OK(t *testing.T) {
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
s.chainStartData.Chainstarted = true
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
assert.Equal(t, 0, len(deps))
}

View File

@@ -26,6 +26,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,25 +2,32 @@ package testing
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
}
// NewPayload --
@@ -58,3 +65,52 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
}
return b, e.ErrExecBlockByHash
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := e.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
currentTotalDifficulty, _ := uint256.FromBig(b)
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if blockReachedTTD {
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentTotalDifficulty, _ := uint256.FromBig(b)
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}

View File

@@ -144,6 +144,8 @@ type RPCClient struct {
Backend *backends.SimulatedBackend
}
func (_ *RPCClient) Close() {}
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return nil
}

View File

@@ -77,7 +77,6 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -162,7 +161,6 @@ go_test(
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
case version.Phase0:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
continue
}
b.Block = &ethpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
case version.Altair:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
continue
}
b.Block = &ethpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
case version.Bellatrix:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
if !ok {
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
continue
}
b.Block = &ethpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
}
if err := stream.Send(b); err != nil {

View File

@@ -161,7 +161,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
depositTrie = finalizedDeposits.Deposits
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
for _, dep := range upToEth1DataDeposits {

View File

@@ -3,11 +3,7 @@ package validator
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -181,79 +177,7 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context) ([]byte, boo
return terminalBlockHash.Bytes(), true, nil
}
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
}
// This returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx)
}
// activationEpochNotReached returns true if activation epoch has not been reach.
@@ -270,18 +194,6 @@ func activationEpochNotReached(slot types.Slot) bool {
return false
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}
func emptyPayload() *enginev1.ExecutionPayload {
return &enginev1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
types "github.com/prysmaticlabs/eth2-types"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
@@ -22,26 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/util"
)
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestServer_activationEpochNotReached(t *testing.T) {
require.Equal(t, false, activationEpochNotReached(0))
@@ -154,137 +133,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
}
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
},
}
b, e, err := vs.getPowBlockHashAtTerminalTotalDifficulty(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
tests := []struct {
name string

View File

@@ -15,7 +15,7 @@ var (
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",
Usage: "A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
Value: "",
Value: "http://localhost:8545",
}
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests

View File

@@ -27,7 +27,7 @@ func FlagOptions(c *cli.Context) ([]powchain.Option, error) {
powchain.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
}
if len(jwtSecret) > 0 {
opts = append(opts, powchain.WithJWTSecret(jwtSecret))
opts = append(opts, powchain.WithHttpEndpointsAndJWTSecret(endpoints, jwtSecret))
}
return opts, nil
}

View File

@@ -48,6 +48,7 @@ go_test(
"@consensus_spec_tests_mainnet//:test_data",
"@consensus_spec_tests_minimal//:test_data",
"@eth2_networks//:configs",
"testdata/e2e_config.yaml",
],
gotags = ["develop"],
race = "on",
@@ -61,3 +62,9 @@ go_test(
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)
filegroup(
name = "custom_configs",
srcs = glob(["testdata/*.yaml"]),
visibility = ["//testing:__subpackages__"],
)

View File

@@ -18,94 +18,94 @@ import (
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT"}
func TestLoadConfigFileMainnet(t *testing.T) {
func TestLoadConfigFile(t *testing.T) {
// See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml
assertVals := func(name string, fields []string, c1, c2 *params.BeaconChainConfig) {
assertVals := func(name string, fields []string, expected, actual *params.BeaconChainConfig) {
// Misc params.
assert.Equal(t, c1.MaxCommitteesPerSlot, c2.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, c1.TargetCommitteeSize, c2.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, c1.MaxValidatorsPerCommittee, c2.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, c1.MinPerEpochChurnLimit, c2.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, c1.ChurnLimitQuotient, c2.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, c1.ShuffleRoundCount, c2.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, c1.MinGenesisActiveValidatorCount, c2.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, c1.MinGenesisTime, c2.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, c1.HysteresisQuotient, c2.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, c1.HysteresisDownwardMultiplier, c2.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, c1.HysteresisUpwardMultiplier, c2.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
assert.Equal(t, expected.MaxCommitteesPerSlot, actual.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, expected.TargetCommitteeSize, actual.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, expected.MaxValidatorsPerCommittee, actual.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, expected.MinPerEpochChurnLimit, actual.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, expected.ChurnLimitQuotient, actual.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, expected.ShuffleRoundCount, actual.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, expected.MinGenesisActiveValidatorCount, actual.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, expected.MinGenesisTime, actual.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, expected.HysteresisQuotient, actual.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
// Fork Choice params.
assert.Equal(t, c1.SafeSlotsToUpdateJustified, c2.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
assert.Equal(t, expected.SafeSlotsToUpdateJustified, actual.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
// Validator params.
assert.Equal(t, c1.Eth1FollowDistance, c2.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, c1.TargetAggregatorsPerCommittee, c2.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, c1.RandomSubnetsPerValidator, c2.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, c1.EpochsPerRandomSubnetSubscription, c2.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, c1.SecondsPerETH1Block, c2.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
assert.Equal(t, expected.Eth1FollowDistance, actual.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, expected.TargetAggregatorsPerCommittee, actual.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, expected.RandomSubnetsPerValidator, actual.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, expected.EpochsPerRandomSubnetSubscription, actual.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, expected.SecondsPerETH1Block, actual.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
// Deposit contract.
assert.Equal(t, c1.DepositChainID, c2.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, c1.DepositNetworkID, c2.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, c1.DepositContractAddress, c2.DepositContractAddress, "%s: DepositContractAddress", name)
assert.Equal(t, expected.DepositChainID, actual.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, expected.DepositNetworkID, actual.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, expected.DepositContractAddress, actual.DepositContractAddress, "%s: DepositContractAddress", name)
// Gwei values.
assert.Equal(t, c1.MinDepositAmount, c2.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, c1.MaxEffectiveBalance, c2.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, c1.EjectionBalance, c2.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, c1.EffectiveBalanceIncrement, c2.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
assert.Equal(t, expected.MinDepositAmount, actual.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, expected.MaxEffectiveBalance, actual.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, expected.EjectionBalance, actual.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
// Initial values.
assert.DeepEqual(t, c1.GenesisForkVersion, c2.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, c1.BLSWithdrawalPrefixByte, c2.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
// Time parameters.
assert.Equal(t, c1.GenesisDelay, c2.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, c1.SecondsPerSlot, c2.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, c1.MinAttestationInclusionDelay, c2.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, c1.SlotsPerEpoch, c2.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, c1.MinSeedLookahead, c2.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, c1.MaxSeedLookahead, c2.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, c1.EpochsPerEth1VotingPeriod, c2.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, c1.SlotsPerHistoricalRoot, c2.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, c1.MinValidatorWithdrawabilityDelay, c2.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, c1.ShardCommitteePeriod, c2.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, c1.MinEpochsToInactivityPenalty, c2.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
assert.Equal(t, expected.GenesisDelay, actual.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, expected.SecondsPerSlot, actual.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, expected.SlotsPerEpoch, actual.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, expected.MinSeedLookahead, actual.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, expected.MaxSeedLookahead, actual.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, expected.EpochsPerEth1VotingPeriod, actual.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, expected.SlotsPerHistoricalRoot, actual.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, expected.MinValidatorWithdrawabilityDelay, actual.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, expected.ShardCommitteePeriod, actual.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, expected.MinEpochsToInactivityPenalty, actual.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
// State vector lengths.
assert.Equal(t, c1.EpochsPerHistoricalVector, c2.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, c1.EpochsPerSlashingsVector, c2.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, c1.HistoricalRootsLimit, c2.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, c1.ValidatorRegistryLimit, c2.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
assert.Equal(t, expected.EpochsPerHistoricalVector, actual.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, expected.EpochsPerSlashingsVector, actual.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, expected.HistoricalRootsLimit, actual.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, expected.ValidatorRegistryLimit, actual.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
// Reward and penalty quotients.
assert.Equal(t, c1.BaseRewardFactor, c2.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, c1.WhistleBlowerRewardQuotient, c2.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, c1.ProposerRewardQuotient, c2.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotient, c2.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotientAltair, c2.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotient, c2.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotientAltair, c2.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, c1.ProportionalSlashingMultiplier, c2.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, c1.ProportionalSlashingMultiplierAltair, c2.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
assert.Equal(t, expected.BaseRewardFactor, actual.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, expected.WhistleBlowerRewardQuotient, actual.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, expected.ProposerRewardQuotient, actual.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotient, actual.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotientAltair, actual.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotient, actual.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotientAltair, actual.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, expected.ProportionalSlashingMultiplier, actual.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, expected.ProportionalSlashingMultiplierAltair, actual.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
// Max operations per block.
assert.Equal(t, c1.MaxProposerSlashings, c2.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, c1.MaxAttesterSlashings, c2.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, c1.MaxAttestations, c2.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, c1.MaxDeposits, c2.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, c1.MaxVoluntaryExits, c2.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
assert.Equal(t, expected.MaxProposerSlashings, actual.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, expected.MaxAttesterSlashings, actual.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, expected.MaxAttestations, actual.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, expected.MaxDeposits, actual.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
// Signature domains.
assert.Equal(t, c1.DomainBeaconProposer, c2.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, c1.DomainBeaconAttester, c2.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, c1.DomainRandao, c2.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, c1.DomainDeposit, c2.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, c1.DomainVoluntaryExit, c2.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, c1.DomainSelectionProof, c2.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, c1.DomainAggregateAndProof, c2.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assert.Equal(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, expected.DomainRandao, actual.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, expected.DomainDeposit, actual.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assertYamlFieldsMatch(t, name, fields, c1, c2)
assertYamlFieldsMatch(t, name, fields, expected, actual)
}
t.Run("mainnet", func(t *testing.T) {
@@ -129,6 +129,17 @@ func TestLoadConfigFileMainnet(t *testing.T) {
fields := fieldsFromYamls(t, append(minimalPresetsFiles, minimalConfigFile))
assertVals("minimal", fields, params.MinimalSpecConfig(), params.BeaconConfig())
})
t.Run("e2e", func(t *testing.T) {
minimalPresetsFiles := presetsFilePath(t, "minimal")
for _, fp := range minimalPresetsFiles {
params.LoadChainConfigFile(fp, nil)
}
configFile := "testdata/e2e_config.yaml"
params.LoadChainConfigFile(configFile, nil)
fields := fieldsFromYamls(t, append(minimalPresetsFiles, configFile))
assertVals("e2e", fields, params.E2ETestConfig(), params.BeaconConfig())
})
}
func TestLoadConfigFile_OverwriteCorrectly(t *testing.T) {

118
config/params/testdata/e2e_config.yaml vendored Normal file
View File

@@ -0,0 +1,118 @@
# e2e config
# Extends the minimal preset
PRESET_BASE: 'minimal'
# Transition
# ---------------------------------------------------------------
# TBD, 2**256-2**10 is a placeholder, e2e is 600
TERMINAL_TOTAL_DIFFICULTY: 600
# By default, don't use these params
#TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
#TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# [customized]
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 256 # Override for e2e tests
# Jan 3, 2020
MIN_GENESIS_TIME: 1578009600
# Highest byte set to 0x01 to avoid collisions with mainnet versioning
GENESIS_FORK_VERSION: 0x000000fd
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
GENESIS_DELAY: 10 # Override for e2e tests
# Forking
# ---------------------------------------------------------------
# Values provided for illustrative purposes.
# Individual tests/testnets may set different values.
# Altair
ALTAIR_FORK_VERSION: 0x010000fd
ALTAIR_FORK_EPOCH: 6 # Override for e2e
# Bellatrix
BELLATRIX_FORK_VERSION: 0x020000fd
BELLATRIX_FORK_EPOCH: 8
# Sharding
SHARDING_FORK_VERSION: 0x030000fd
SHARDING_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# [customized] Faster for testing purposes
SECONDS_PER_SLOT: 10 # Override for e2e tests
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 2 # Override for e2e tests
# 2**8 (= 256) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
SHARD_COMMITTEE_PERIOD: 4 # Override for e2e tests
# [customized] process deposits more quickly, but insecure
ETH1_FOLLOW_DISTANCE: 4 # Override for e2e tests
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# [customized] scale queue churn at much lower validator counts for testing
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 70%
PROPOSER_SCORE_BOOST: 70
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 1337 # Override for e2e tests
DEPOSIT_NETWORK_ID: 1337 # Override for e2e tests
# Configured on a per testnet basis
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# Updated penalty values
# ---------------------------------------------------------------
# 3 * 2**24 (= 50,331,648)
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
# 2**6 (= 64)
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
# 2
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
# Sync committee
# ---------------------------------------------------------------
# [customized]
SYNC_COMMITTEE_SIZE: 32
# [customized]
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# Sync protocol
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
# Other e2e overrides
# ---------------------------------------------------------------
CONFIG_NAME: "end-to-end"
SLOTS_PER_EPOCH: 6
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
MAX_SEED_LOOKAHEAD: 1

View File

@@ -45,7 +45,7 @@ func E2ETestConfig() *BeaconChainConfig {
e2eConfig.DepositChainID = 1337 // Chain ID of eth1 dev net.
e2eConfig.DepositNetworkID = 1337 // Network ID of eth1 dev net.
// Altair Fork Parameters.
// Fork Parameters.
e2eConfig.AltairForkEpoch = altairE2EForkEpoch
e2eConfig.BellatrixForkEpoch = bellatrixE2EForkEpoch

View File

@@ -3,24 +3,32 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"endpoint.go",
"external_ip.go",
],
importpath = "github.com/prysmaticlabs/prysm/network",
visibility = ["//visibility:public"],
deps = ["//network/authorization:go_default_library"],
deps = [
"//network/authorization:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"auth_test.go",
"endpoint_test.go",
"external_ip_test.go",
],
embed = [":go_default_library"],
deps = [
"//encoding/bytesutil:go_default_library",
"//network/authorization:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
],
)

View File

@@ -1,4 +1,4 @@
package powchain
package network
import (
"net/http"
@@ -8,6 +8,9 @@ import (
"github.com/pkg/errors"
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
// This creates a custom HTTP transport which we can attach to our HTTP client
// in order to inject JWT auth strings into our HTTP request headers. Authentication
// is required when interacting with an Ethereum engine API server via HTTP, and JWT

View File

@@ -1,4 +1,4 @@
package powchain
package network
import (
"net/http"

View File

@@ -2,6 +2,7 @@ package network
import (
"errors"
"net/http"
"strings"
"github.com/prysmaticlabs/prysm/network/authorization"
@@ -24,6 +25,22 @@ func (e Endpoint) Equals(other Endpoint) bool {
return e.Url == other.Url && e.Auth.Equals(other.Auth)
}
// HttpClient creates a http client object dependant
// on the properties of the network endpoint.
func (e Endpoint) HttpClient() *http.Client {
if e.Auth.Method != authorization.Bearer {
return http.DefaultClient
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: []byte(e.Auth.Value),
}
return &http.Client{
Timeout: DefaultRPCHTTPTimeout,
Transport: authTransport,
}
}
// Equals compares two authorization data objects for equality.
func (d AuthorizationData) Equals(other AuthorizationData) bool {
return d.Method == other.Method && d.Value == other.Value

File diff suppressed because it is too large Load Diff

View File

@@ -123,6 +123,92 @@ func local_request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshale
}
func request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := client.ListRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := server.ListRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
func request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ImportRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ImportRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ImportRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ImportRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
func request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.DeleteRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.DeleteRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
// UnaryRPC :call KeyManagementServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -198,6 +284,75 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -299,6 +454,66 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -308,6 +523,12 @@ var (
pattern_KeyManagement_ImportKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
pattern_KeyManagement_DeleteKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
pattern_KeyManagement_ListRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
pattern_KeyManagement_ImportRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
pattern_KeyManagement_DeleteRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
)
var (
@@ -316,4 +537,10 @@ var (
forward_KeyManagement_ImportKeystores_0 = runtime.ForwardResponseMessage
forward_KeyManagement_DeleteKeystores_0 = runtime.ForwardResponseMessage
forward_KeyManagement_ListRemoteKeys_0 = runtime.ForwardResponseMessage
forward_KeyManagement_ImportRemoteKeys_0 = runtime.ForwardResponseMessage
forward_KeyManagement_DeleteRemoteKeys_0 = runtime.ForwardResponseMessage
)

View File

@@ -84,6 +84,27 @@ service KeyManagement {
body: "*"
};
}
rpc ListRemoteKeys(google.protobuf.Empty) returns (ListRemoteKeysResponse) {
option (google.api.http) = {
get: "/internal/eth/v1/remotekeys"
};
}
rpc ImportRemoteKeys(ImportRemoteKeysRequest) returns (ImportRemoteKeysResponse) {
option (google.api.http) = {
post: "/internal/eth/v1/remotekeys",
body: "*"
};
}
rpc DeleteRemoteKeys(DeleteRemoteKeysRequest) returns (DeleteRemoteKeysResponse) {
option (google.api.http) = {
delete: "/internal/eth/v1/remotekeys",
body: "*"
};
}
}
message ListKeystoresResponse {
@@ -133,3 +154,55 @@ message DeletedKeystoreStatus {
Status status = 1;
string message = 2;
}
message ListRemoteKeysResponse {
message Keystore {
bytes pubkey = 1;
string url = 2;
bool readonly = 3;
}
repeated Keystore data = 1;
}
message ImportRemoteKeysRequest {
message Keystore {
bytes pubkey = 1;
string url = 2;
}
repeated Keystore remote_keys = 1;
}
message ImportRemoteKeysResponse {
repeated ImportedRemoteKeysStatus data = 1;
}
message DeleteRemoteKeysRequest {
repeated bytes pubkeys = 1;
}
message DeleteRemoteKeysResponse {
repeated DeletedRemoteKeysStatus data = 1;
}
message ImportedRemoteKeysStatus {
enum Status {
UNKNOWN = 0;
IMPORTED = 1;
DUPLICATE = 2;
ERROR = 3;
}
Status status = 1;
string message = 2;
}
message DeletedRemoteKeysStatus {
enum Status {
NOT_FOUND = 0;
DELETED = 1;
ERROR = 3; // skips 2 to match Delete KeyStore status which has error = 3.
}
Status status = 1;
string message = 2;
}

File diff suppressed because it is too large Load Diff

View File

@@ -360,6 +360,9 @@ message StreamBlocksResponse {
// Representing an altair block.
SignedBeaconBlockAltair altair_block = 2;
// Representing a bellatrix block.
SignedBeaconBlockBellatrix bellatrix_block = 3;
}
}

View File

@@ -17,6 +17,7 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -35,6 +36,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -71,6 +73,7 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -89,6 +92,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -54,11 +54,15 @@ go_test(
name = "go_default_test",
size = "small",
srcs = ["web3remotesigner_test.go"],
data = ["@web3signer"],
data = [
"//config/params:custom_configs",
"@web3signer",
],
deps = [
":go_default_library",
"//config/params:go_default_library",
"//testing/endtoend/params:go_default_library",
"//testing/require:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)

View File

@@ -126,7 +126,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+index),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1RPCPort),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1AuthRPCPort+index),
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),

View File

@@ -89,7 +89,11 @@ func (node *Node) Start(ctx context.Context) error {
"--ipcdisable",
"--verbosity=4",
}
// If we are testing sync, geth needs to be run via full sync as snap sync does not
// work in our setup.
if node.index == e2e.TestParams.BeaconNodeCount+e2e.TestParams.LighthouseBeaconNodeCount {
args = append(args, []string{"--syncmode=full"}...)
}
runCmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
file, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1_"+strconv.Itoa(node.index)+".log")
if err != nil {

View File

@@ -151,7 +151,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
args = append(args, features.E2EValidatorFlags...)
}
if v.config.UseWeb3RemoteSigner {
args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
args = append(args, fmt.Sprintf("--%s=http://localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
// Write the pubkeys as comma seperated hex strings with 0x prefix.
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
_, pubs, err := interop.DeterministicallyGenerateKeys(uint64(offset), uint64(validatorNum))

View File

@@ -37,13 +37,16 @@ type rawKeyFile struct {
}
type Web3RemoteSigner struct {
ctx context.Context
started chan struct{}
ctx context.Context
started chan struct{}
configFilePath string
cmd *exec.Cmd
}
func NewWeb3RemoteSigner() *Web3RemoteSigner {
func NewWeb3RemoteSigner(configFilePath string) *Web3RemoteSigner {
return &Web3RemoteSigner{
started: make(chan struct{}, 1),
started: make(chan struct{}, 1),
configFilePath: configFilePath,
}
}
@@ -66,6 +69,12 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
return err
}
network := "minimal"
if len(w.configFilePath) > 0 {
// A file path to yaml config file is acceptable network argument.
network = w.configFilePath
}
args := []string{
// Global flags
fmt.Sprintf("--key-store-path=%s", keystorePath),
@@ -75,13 +84,13 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
// Command
"eth2",
// Command flags
"--network=minimal",
"--network=" + network,
"--slashing-protection-enabled=false", // Otherwise, a postgres DB is required.
"--enable-key-manager-api=true",
"--key-manager-api-enabled=true",
}
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Test code is safe to do this.
w.cmd = cmd
// Write stdout and stderr to log files.
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, "web3signer.stdout.log"))
if err != nil {

View File

@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
e2eparams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
@@ -13,7 +14,11 @@ import (
func TestWeb3RemoteSigner_StartsAndReturnsPublicKeys(t *testing.T) {
require.NoError(t, e2eparams.Init(0))
wsc := components.NewWeb3RemoteSigner()
fp, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
wsc := components.NewWeb3RemoteSigner(fp)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

View File

@@ -6,10 +6,11 @@ lighthouse_archive_name = "lighthouse-%s-x86_64-unknown-linux-gnu-portable.tar.g
def e2e_deps():
http_archive(
name = "web3signer",
urls = ["https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.tar.gz/versions/21.10.5/web3signer-21.10.5.tar.gz"],
sha256 = "d122429f6a310bc555d1281e0b3f4e3ac43a7beec5e5dcf0a0d2416a5984f461",
# Built from commit 17d253b which has important unreleased changes.
urls = ["https://prysmaticlabs.com/uploads/web3signer-17d253b.tar.gz"],
sha256 = "bf450a59a0845c1ce8100b3192c7fec021b565efe8b1ab46bed9f71cb994a6d7",
build_file = "@prysm//testing/endtoend:web3signer.BUILD",
strip_prefix = "web3signer-21.10.5",
strip_prefix = "web3signer-develop",
)
http_archive(

View File

@@ -17,6 +17,7 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/build/bazel"
"github.com/prysmaticlabs/prysm/config/params"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -89,6 +90,21 @@ func (r *testRunner) run() {
})
}
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
cfg, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
web3RemoteSigner = components.NewWeb3RemoteSigner(cfg)
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
// Boot node.
bootNode := components.NewBootNode()
g.Go(func() error {
@@ -146,18 +162,6 @@ func (r *testRunner) run() {
return nil
})
// Web3 remote signer.
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
web3RemoteSigner = components.NewWeb3RemoteSigner()
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
if multiClientActive {
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
@@ -332,7 +336,6 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
depositCheckValidator := components.NewValidatorNode(r.config, int(e2e.DepositCount), e2e.TestParams.BeaconNodeCount, minGenesisActiveCount)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {
@@ -369,7 +372,7 @@ func (r *testRunner) testTxGeneration(ctx context.Context, g *errgroup.Group, ke
func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
conns []*grpc.ClientConn, tickingStartTime time.Time, bootnodeEnr, minerEnr string) error {
t, config := r.t, r.config
index := e2e.TestParams.BeaconNodeCount
index := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
ethNode := eth1.NewNode(index, minerEnr)
g.Go(func() error {
return ethNode.Start(ctx)

View File

@@ -7,6 +7,7 @@ go_library(
"api_gateway_v1alpha1.go",
"api_middleware.go",
"data.go",
"execution_engine.go",
"finality.go",
"fork.go",
"metrics.go",

View File

@@ -0,0 +1,54 @@
package evaluators
import (
"context"
"math"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/policies"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
// TransactionsPresent is an evaluator to make sure transactions send to the execution engine
// appear in consensus client blocks' execution payload.
var TransactionsPresent = types.Evaluator{
Name: "transactions_present_at_epoch_%d",
Policy: policies.AfterNthEpoch(helpers.BellatrixE2EForkEpoch),
Evaluation: transactionsPresent,
}
func transactionsPresent(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconChainClient(conn)
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
req := &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch.Sub(1)}}
blks, err := client.ListBeaconBlocks(context.Background(), req)
if err != nil {
return errors.Wrap(err, "failed to get blocks from beacon-chain")
}
expectedTxNum := int(math.Round(float64(params.E2ETestConfig().SlotsPerEpoch) * float64(e2e.NumOfExecEngineTxs) * e2e.ExpectedExecEngineTxsThreshold))
var numberOfTxs int
for _, ctr := range blks.BlockContainers {
switch ctr.Block.(type) {
case *ethpb.BeaconBlockContainer_BellatrixBlock:
numberOfTxs += len(ctr.GetBellatrixBlock().Block.Body.ExecutionPayload.Transactions)
}
}
if numberOfTxs < expectedTxNum {
return errors.Errorf(
"not enough transactions in execution payload, expected=%d vs actual=%d",
expectedTxNum,
numberOfTxs,
)
}
return nil
}

View File

@@ -14,14 +14,21 @@ import (
"google.golang.org/grpc"
)
// ForkTransition ensures that the hard fork has occurred successfully.
var ForkTransition = types.Evaluator{
Name: "fork_transition_%d",
// AltairForkTransition ensures that the Altair hard fork has occurred successfully.
var AltairForkTransition = types.Evaluator{
Name: "altair_fork_transition_%d",
Policy: policies.OnEpoch(helpers.AltairE2EForkEpoch),
Evaluation: forkOccurs,
Evaluation: altairForkOccurs,
}
func forkOccurs(conns ...*grpc.ClientConn) error {
// BellatrixForkTransition ensures that the Bellatrix hard fork has occurred successfully.
var BellatrixForkTransition = types.Evaluator{
Name: "bellatrix_fork_transition_%d",
Policy: policies.OnEpoch(helpers.BellatrixE2EForkEpoch),
Evaluation: bellatrixForkOccurs,
}
func altairForkOccurs(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
ctx, cancel := context.WithCancel(context.Background())
@@ -62,3 +69,48 @@ func forkOccurs(conns ...*grpc.ClientConn) error {
}
return nil
}
func bellatrixForkOccurs(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.StreamBlocksAltair(ctx, &ethpb.StreamBlocksRequest{VerifiedOnly: true})
if err != nil {
return errors.Wrap(err, "failed to get stream")
}
fSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
if err != nil {
return err
}
if ctx.Err() == context.Canceled {
return errors.New("context canceled prematurely")
}
res, err := stream.Recv()
if err != nil {
return err
}
if res == nil || res.Block == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetPhase0Block() == nil && res.GetAltairBlock() == nil && res.GetBellatrixBlock() == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetPhase0Block() != nil {
return errors.New("phase 0 block returned after bellatrix fork has occurred")
}
if res.GetAltairBlock() != nil {
return errors.New("altair block returned after bellatrix fork has occurred")
}
blk, err := wrapperv2.WrappedSignedBeaconBlock(res.GetBellatrixBlock())
if err != nil {
return err
}
if err := coreHelper.BeaconBlockIsNil(blk); err != nil {
return err
}
if blk.Block().Slot() < fSlot {
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
}
return nil
}

View File

@@ -419,5 +419,8 @@ func convertToBlockInterface(obj *ethpb.BeaconBlockContainer) (block.SignedBeaco
if obj.GetBellatrixBlock() != nil {
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
}
if obj.GetBellatrixBlock() != nil {
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
}
return nil, errors.New("container has no block")
}

View File

@@ -54,11 +54,13 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ColdStateCheckpoint,
ev.ForkTransition,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.TransactionsPresent,
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{

View File

@@ -27,7 +27,6 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
}
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
t.Skip("TODO(9994): Complete web3signer client implementation, currently blocked by https://github.com/ConsenSys/web3signer/issues/494")
e2eMinimal(t, &testArgs{
usePrysmSh: false,
useWeb3RemoteSigner: true,
@@ -54,6 +53,10 @@ func e2eMinimal(t *testing.T, args *testArgs) {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
// TODO(#10053): Web3signer does not support bellatrix yet.
if args.useWeb3RemoteSigner {
epochsToRun = helpers.BellatrixE2EForkEpoch - 1
}
if args.usePrysmSh {
// If using prysm.sh, run for only 6 epochs.
// TODO(#9166): remove this block once v2 changes are live.
@@ -83,12 +86,14 @@ func e2eMinimal(t *testing.T, args *testArgs) {
ev.ValidatorHasExited,
ev.ValidatorsVoteWithTheMajority,
ev.ColdStateCheckpoint,
ev.ForkTransition,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
ev.TransactionsPresent,
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{

View File

@@ -69,6 +69,12 @@ var StandardLighthouseNodeCount = 2
// DepositCount is the amount of deposits E2E makes on a separate validator client.
var DepositCount = uint64(64)
// NumOfExecEngineTxs is the number of transaction sent to the execution engine.
var NumOfExecEngineTxs = uint64(200)
// ExpectedExecEngineTxsThreshold is the portion of execution engine transactions we expect to find in blocks.
var ExpectedExecEngineTxsThreshold = 0.7
// Base port values.
const (
portSpan = 50

View File

@@ -102,3 +102,7 @@ func (m *engineMock) ExecutionBlockByHash(_ context.Context, hash common.Hash) (
Hash: b.BlockHash,
}, nil
}
func (m *engineMock) GetTerminalBlockHash(context.Context) ([]byte, bool, error) {
return nil, false, nil
}

View File

@@ -69,7 +69,7 @@ type ValidatorService struct {
db db.Database
grpcHeaders []string
graffiti []byte
web3SignerConfig *remote_web3signer.SetupConfig
Web3SignerConfig *remote_web3signer.SetupConfig
feeRecipientConfig *validator_service_config.FeeRecipientConfig
}
@@ -122,7 +122,7 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
interopKeysConfig: cfg.InteropKeysConfig,
graffitiStruct: cfg.GraffitiStruct,
logDutyCountDown: cfg.LogDutyCountDown,
web3SignerConfig: cfg.Web3SignerConfig,
Web3SignerConfig: cfg.Web3SignerConfig,
feeRecipientConfig: cfg.FeeRecipientConfig,
}, nil
}
@@ -204,7 +204,7 @@ func (v *ValidatorService) Start() {
graffitiOrderedIndex: graffitiOrderedIndex,
eipImportBlacklistedPublicKeys: slashablePublicKeys,
logDutyCountDown: v.logDutyCountDown,
Web3SignerConfig: v.web3SignerConfig,
Web3SignerConfig: v.Web3SignerConfig,
feeRecipientConfig: v.feeRecipientConfig,
walletIntializedChannel: make(chan *wallet.Wallet, 1),
}

View File

@@ -117,6 +117,7 @@ func (v *validator) WaitForKeymanagerInitialization(ctx context.Context) error {
}
if v.useWeb && v.wallet == nil {
log.Info("Waiting for keymanager to initialize validator client with web UI")
// if wallet is not set, wait for it to be set through the UI
km, err := waitForWebWalletInitialization(ctx, v.walletInitializedFeed, v.walletIntializedChannel)
if err != nil {

View File

@@ -32,5 +32,6 @@ go_test(
"//validator/keymanager/derived:go_default_library",
"//validator/keymanager/local:go_default_library",
"//validator/keymanager/remote:go_default_library",
"//validator/keymanager/remote-web3signer:go_default_library",
],
)

View File

@@ -28,6 +28,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
@@ -36,8 +37,10 @@ go_test(
srcs = ["keymanager_test.go"],
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//testing/require:go_default_library",
"//validator/keymanager/remote-web3signer/internal:go_default_library",

View File

@@ -1,6 +1,7 @@
package remote_web3signer
import (
"bytes"
"context"
"encoding/json"
"fmt"
@@ -20,6 +21,7 @@ import (
remote_utils "github.com/prysmaticlabs/prysm/validator/keymanager/remote-utils"
"github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/internal"
v1 "github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/v1"
log "github.com/sirupsen/logrus"
)
// SetupConfig includes configuration values for initializing.
@@ -48,6 +50,7 @@ type Keymanager struct {
providedPublicKeys [][48]byte
accountsChangedFeed *event.Feed
validator *validator.Validate
publicKeysUrlCalled bool
}
// NewKeymanager instantiates a new web3signer key manager.
@@ -55,12 +58,6 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
if cfg.BaseEndpoint == "" || !bytesutil.IsValidRoot(cfg.GenesisValidatorsRoot) {
return nil, fmt.Errorf("invalid setup config, one or more configs are empty: BaseEndpoint: %v, GenesisValidatorsRoot: %#x", cfg.BaseEndpoint, cfg.GenesisValidatorsRoot)
}
if cfg.PublicKeysURL != "" && len(cfg.ProvidedPublicKeys) != 0 {
return nil, errors.New("Either a provided list of public keys or a URL to a list of public keys must be provided, but not both")
}
if cfg.PublicKeysURL == "" && len(cfg.ProvidedPublicKeys) == 0 {
return nil, errors.New("no valid public key options provided")
}
client, err := internal.NewApiClient(cfg.BaseEndpoint)
if err != nil {
return nil, errors.Wrap(err, "could not create apiClient")
@@ -72,6 +69,7 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
publicKeysURL: cfg.PublicKeysURL,
providedPublicKeys: cfg.ProvidedPublicKeys,
validator: validator.New(),
publicKeysUrlCalled: false,
}, nil
}
@@ -79,12 +77,14 @@ func NewKeymanager(_ context.Context, cfg *SetupConfig) (*Keymanager, error) {
// from the remote server or from the provided keys if there are no existing public keys set
// or provides the existing keys in the keymanager.
func (km *Keymanager) FetchValidatingPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
if km.publicKeysURL != "" && len(km.providedPublicKeys) == 0 {
if km.publicKeysURL != "" && !km.publicKeysUrlCalled {
providedPublicKeys, err := km.client.GetPublicKeys(ctx, km.publicKeysURL)
if err != nil {
erroredResponsesTotal.Inc()
return nil, errors.Wrap(err, fmt.Sprintf("could not get public keys from remote server url: %v", km.publicKeysURL))
}
// makes sure that if the public keys are deleted the validator does not call URL again.
km.publicKeysUrlCalled = true
km.providedPublicKeys = providedPublicKeys
}
return km.providedPublicKeys, nil
@@ -231,13 +231,8 @@ func getSignRequestJson(ctx context.Context, validator *validator.Validate, requ
}
// SubscribeAccountChanges returns the event subscription for changes to public keys.
func (*Keymanager) SubscribeAccountChanges(_ chan [][48]byte) event.Subscription {
// Not used right now.
// Returns a stub for the time being as there is a danger of being slashed if the apiClient reloads keys dynamically.
// Because there is no way to dynamically reload keys, add or remove remote keys we are returning a stub without any event updates for the time being.
return event.NewSubscription(func(i <-chan struct{}) error {
return nil
})
func (km *Keymanager) SubscribeAccountChanges(pubKeysChan chan [][fieldparams.BLSPubkeyLength]byte) event.Subscription {
return km.accountsChangedFeed.Subscribe(pubKeysChan)
}
// ExtractKeystores is not supported for the remote-web3signer keymanager type.
@@ -278,3 +273,73 @@ func (km *Keymanager) ListKeymanagerAccounts(ctx context.Context, cfg keymanager
remote_utils.DisplayRemotePublicKeys(validatingPubKeys)
return nil
}
// AddPublicKeys imports a list of public keys into the keymanager for web3signer use. Returns status with message.
func (km *Keymanager) AddPublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.ImportedRemoteKeysStatus, error) {
if ctx == nil {
return nil, errors.New("context is nil")
}
importedRemoteKeysStatuses := make([]*ethpbservice.ImportedRemoteKeysStatus, len(pubKeys))
for i, pubKey := range pubKeys {
found := false
for _, key := range km.providedPublicKeys {
if bytes.Equal(key[:], pubKey[:]) {
found = true
break
}
}
if found {
importedRemoteKeysStatuses[i] = &ethpbservice.ImportedRemoteKeysStatus{
Status: ethpbservice.ImportedRemoteKeysStatus_DUPLICATE,
Message: fmt.Sprintf("Duplicate pubkey: %v, already in use", hexutil.Encode(pubKey[:])),
}
continue
}
km.providedPublicKeys = append(km.providedPublicKeys, pubKey)
importedRemoteKeysStatuses[i] = &ethpbservice.ImportedRemoteKeysStatus{
Status: ethpbservice.ImportedRemoteKeysStatus_IMPORTED,
Message: fmt.Sprintf("Successfully added pubkey: %v", hexutil.Encode(pubKey[:])),
}
log.Debug("Added pubkey to keymanager for web3signer", "pubkey", hexutil.Encode(pubKey[:]))
}
km.accountsChangedFeed.Send(km.providedPublicKeys)
return importedRemoteKeysStatuses, nil
}
// DeletePublicKeys removes a list of public keys from the keymanager for web3signer use. Returns status with message.
func (km *Keymanager) DeletePublicKeys(ctx context.Context, pubKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.DeletedRemoteKeysStatus, error) {
if ctx == nil {
return nil, errors.New("context is nil")
}
deletedRemoteKeysStatuses := make([]*ethpbservice.DeletedRemoteKeysStatus, len(pubKeys))
if len(km.providedPublicKeys) == 0 {
for i := range deletedRemoteKeysStatuses {
deletedRemoteKeysStatuses[i] = &ethpbservice.DeletedRemoteKeysStatus{
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
Message: "No pubkeys are set in validator",
}
}
return deletedRemoteKeysStatuses, nil
}
for i, pubkey := range pubKeys {
for in, key := range km.providedPublicKeys {
if bytes.Equal(key[:], pubkey[:]) {
km.providedPublicKeys = append(km.providedPublicKeys[:in], km.providedPublicKeys[in+1:]...)
deletedRemoteKeysStatuses[i] = &ethpbservice.DeletedRemoteKeysStatus{
Status: ethpbservice.DeletedRemoteKeysStatus_DELETED,
Message: fmt.Sprintf("Successfully deleted pubkey: %v", hexutil.Encode(pubkey[:])),
}
log.Debug("Deleted pubkey from keymanager for web3signer", "pubkey", hexutil.Encode(pubkey[:]))
break
}
}
if deletedRemoteKeysStatuses[i] == nil {
deletedRemoteKeysStatuses[i] = &ethpbservice.DeletedRemoteKeysStatus{
Status: ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND,
Message: fmt.Sprintf("Pubkey: %v not found", hexutil.Encode(pubkey[:])),
}
}
}
km.accountsChangedFeed.Send(km.providedPublicKeys)
return deletedRemoteKeysStatuses, nil
}

View File

@@ -8,8 +8,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer/internal"
@@ -266,3 +268,72 @@ func TestKeymanager_FetchValidatingPublicKeys_WithExternalURL_ThrowsError(t *tes
assert.Nil(t, resp)
assert.Equal(t, "could not get public keys from remote server url: http://example2.com/api/v1/eth2/publicKeys: mock error", fmt.Sprintf("%v", err))
}
func TestKeymanager_AddPublicKeys(t *testing.T) {
ctx := context.Background()
root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69")
if err != nil {
fmt.Printf("error: %v", err)
}
config := &SetupConfig{
BaseEndpoint: "http://example.com",
GenesisValidatorsRoot: root,
}
km, err := NewKeymanager(ctx, config)
if err != nil {
fmt.Printf("error: %v", err)
}
pubkey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820")
require.NoError(t, err)
publicKeys := [][fieldparams.BLSPubkeyLength]byte{
bytesutil.ToBytes48(pubkey),
}
statuses, err := km.AddPublicKeys(ctx, publicKeys)
require.NoError(t, err)
for _, status := range statuses {
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_IMPORTED, status.Status)
}
statuses, err = km.AddPublicKeys(ctx, publicKeys)
require.NoError(t, err)
for _, status := range statuses {
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_DUPLICATE, status.Status)
}
}
func TestKeymanager_DeletePublicKeys(t *testing.T) {
ctx := context.Background()
root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69")
if err != nil {
fmt.Printf("error: %v", err)
}
config := &SetupConfig{
BaseEndpoint: "http://example.com",
GenesisValidatorsRoot: root,
}
km, err := NewKeymanager(ctx, config)
if err != nil {
fmt.Printf("error: %v", err)
}
pubkey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820")
require.NoError(t, err)
publicKeys := [][fieldparams.BLSPubkeyLength]byte{
bytesutil.ToBytes48(pubkey),
}
statuses, err := km.AddPublicKeys(ctx, publicKeys)
require.NoError(t, err)
for _, status := range statuses {
require.Equal(t, ethpbservice.ImportedRemoteKeysStatus_IMPORTED, status.Status)
}
s, err := km.DeletePublicKeys(ctx, publicKeys)
require.NoError(t, err)
for _, status := range s {
require.Equal(t, ethpbservice.DeletedRemoteKeysStatus_DELETED, status.Status)
}
s, err = km.DeletePublicKeys(ctx, publicKeys)
require.NoError(t, err)
for _, status := range s {
require.Equal(t, ethpbservice.DeletedRemoteKeysStatus_NOT_FOUND, status.Status)
}
}

View File

@@ -60,6 +60,16 @@ type KeyStoreExtractor interface {
ExtractKeystores(ctx context.Context, publicKeys []bls.PublicKey, password string) ([]*Keystore, error)
}
// PublicKeyAdder allows adding public keys to the keymanager.
type PublicKeyAdder interface {
AddPublicKeys(ctx context.Context, publicKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.ImportedRemoteKeysStatus, error)
}
// PublicKeyDeleter allows deleting public keys set in keymanager.
type PublicKeyDeleter interface {
DeletePublicKeys(ctx context.Context, publicKeys [][fieldparams.BLSPubkeyLength]byte) ([]*ethpbservice.DeletedRemoteKeysStatus, error)
}
type ListKeymanagerAccountConfig struct {
ShowDepositData bool
ShowPrivateKeys bool

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/validator/keymanager/derived"
"github.com/prysmaticlabs/prysm/validator/keymanager/local"
"github.com/prysmaticlabs/prysm/validator/keymanager/remote"
remote_web3signer "github.com/prysmaticlabs/prysm/validator/keymanager/remote-web3signer"
)
var (
@@ -25,6 +26,9 @@ var (
_ = keymanager.Importer(&derived.Keymanager{})
_ = keymanager.Deleter(&local.Keymanager{})
_ = keymanager.Deleter(&derived.Keymanager{})
_ = keymanager.PublicKeyAdder(&remote_web3signer.Keymanager{})
_ = keymanager.PublicKeyDeleter(&remote_web3signer.Keymanager{})
)
func TestKeystoreContainsPath(t *testing.T) {

View File

@@ -117,7 +117,7 @@ func NewValidatorClient(cliCtx *cli.Context) (*ValidatorClient, error) {
// client via a web portal, we start the validator client in a different way.
if cliCtx.IsSet(flags.EnableWebFlag.Name) {
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) || cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
return nil, errors.New("web3signer cannot be used with --web")
log.Warn("Remote Keymanager API enabled. Prysm web does not properly support web3signer at this time")
}
log.Info("Enabling web portal to manage the validator client")
if err := validatorClient.initializeForWeb(cliCtx); err != nil {
@@ -191,12 +191,8 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context) error {
dataDir := cliCtx.String(flags.WalletDirFlag.Name)
if !cliCtx.IsSet(flags.InteropNumValidators.Name) {
// Custom Check For Web3Signer
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) || cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) && cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
c.wallet = wallet.NewWalletForWeb3Signer()
} else {
return errors.New("--validators-external-signer-url and --validators-external-signer-public-keys must be used together")
}
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
c.wallet = wallet.NewWalletForWeb3Signer()
} else {
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
return nil, wallet.ErrNoWalletFound
@@ -275,24 +271,28 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context) error {
func (c *ValidatorClient) initializeForWeb(cliCtx *cli.Context) error {
var err error
// Read the wallet password file from the cli context.
if err = setWalletPasswordFilePath(cliCtx); err != nil {
return errors.Wrap(err, "could not read wallet password file")
}
// Read the wallet from the specified path.
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
return nil, nil
})
if err != nil {
return errors.Wrap(err, "could not open wallet")
}
c.wallet = w
dataDir := cliCtx.String(flags.WalletDirFlag.Name)
if c.wallet != nil {
dataDir = c.wallet.AccountsDir()
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
c.wallet = wallet.NewWalletForWeb3Signer()
} else {
// Read the wallet password file from the cli context.
if err = setWalletPasswordFilePath(cliCtx); err != nil {
return errors.Wrap(err, "could not read wallet password file")
}
// Read the wallet from the specified path.
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
return nil, nil
})
if err != nil {
return errors.Wrap(err, "could not open wallet")
}
c.wallet = w
if c.wallet != nil {
dataDir = c.wallet.AccountsDir()
}
}
if cliCtx.String(cmd.DataDirFlag.Name) != cmd.DefaultDataDir() {
dataDir = cliCtx.String(cmd.DataDirFlag.Name)
}
@@ -439,9 +439,8 @@ func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error {
func web3SignerConfig(cliCtx *cli.Context) (*remote_web3signer.SetupConfig, error) {
var web3signerConfig *remote_web3signer.SetupConfig
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) && cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
urlStr := cliCtx.String(flags.Web3SignerURLFlag.Name)
publicKeysStr := cliCtx.String(flags.Web3SignerPublicValidatorKeysFlag.Name)
u, err := url.ParseRequestURI(urlStr)
if err != nil {
return nil, errors.Wrapf(err, "web3signer url %s is invalid", urlStr)
@@ -453,19 +452,22 @@ func web3SignerConfig(cliCtx *cli.Context) (*remote_web3signer.SetupConfig, erro
BaseEndpoint: u.String(),
GenesisValidatorsRoot: nil,
}
pURL, err := url.ParseRequestURI(publicKeysStr)
if err == nil && pURL.Scheme != "" && pURL.Host != "" {
web3signerConfig.PublicKeysURL = publicKeysStr
} else {
var validatorKeys [][48]byte
for _, key := range strings.Split(publicKeysStr, ",") {
decodedKey, decodeErr := hexutil.Decode(key)
if decodeErr != nil {
return nil, errors.Wrapf(decodeErr, "could not decode public key for web3signer: %s", key)
if cliCtx.IsSet(flags.Web3SignerPublicValidatorKeysFlag.Name) {
publicKeysStr := cliCtx.String(flags.Web3SignerPublicValidatorKeysFlag.Name)
pURL, err := url.ParseRequestURI(publicKeysStr)
if err == nil && pURL.Scheme != "" && pURL.Host != "" {
web3signerConfig.PublicKeysURL = publicKeysStr
} else {
var validatorKeys [][48]byte
for _, key := range strings.Split(publicKeysStr, ",") {
decodedKey, decodeErr := hexutil.Decode(key)
if decodeErr != nil {
return nil, errors.Wrapf(decodeErr, "could not decode public key for web3signer: %s", key)
}
validatorKeys = append(validatorKeys, bytesutil.ToBytes48(decodedKey))
}
validatorKeys = append(validatorKeys, bytesutil.ToBytes48(decodedKey))
web3signerConfig.ProvidedPublicKeys = validatorKeys
}
web3signerConfig.ProvidedPublicKeys = validatorKeys
}
}
return web3signerConfig, nil

View File

@@ -112,6 +112,7 @@ go_test(
"//validator/keymanager/remote-web3signer:go_default_library",
"//validator/slashing-protection-history/format:go_default_library",
"//validator/testing:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_google_uuid//:go_default_library",

View File

@@ -17,6 +17,7 @@ func (f *ValidatorEndpointFactory) IsNil() bool {
func (*ValidatorEndpointFactory) Paths() []string {
return []string{
"/eth/v1/keystores",
"/eth/v1/remotekeys",
}
}
@@ -30,6 +31,12 @@ func (*ValidatorEndpointFactory) Create(path string) (*apimiddleware.Endpoint, e
endpoint.PostResponse = &importKeystoresResponseJson{}
endpoint.DeleteRequest = &deleteKeystoresRequestJson{}
endpoint.DeleteResponse = &deleteKeystoresResponseJson{}
case "/eth/v1/remotekeys":
endpoint.GetResponse = &listRemoteKeysResponseJson{}
endpoint.PostRequest = &importRemoteKeysRequestJson{}
endpoint.PostResponse = &importRemoteKeysResponseJson{}
endpoint.DeleteRequest = &deleteRemoteKeysRequestJson{}
endpoint.DeleteResponse = &deleteRemoteKeysResponseJson{}
default:
return nil, errors.New("invalid path")
}

View File

@@ -32,3 +32,37 @@ type deleteKeystoresResponseJson struct {
Statuses []*statusJson `json:"data"`
SlashingProtection string `json:"slashing_protection"`
}
//remote keymanager api
type listRemoteKeysResponseJson struct {
Keystores []*remoteKeysListJson `json:"data"`
}
type remoteKeysListJson struct {
Pubkey string `json:"pubkey" hex:"true"`
Url string `json:"url"`
Readonly bool `json:"readonly"`
}
type remoteKeysJson struct {
Pubkey string `json:"pubkey" hex:"true"`
Url string `json:"url"`
Readonly bool `json:"readonly"`
}
type importRemoteKeysRequestJson struct {
Keystores []*remoteKeysJson `json:"remote_keys"`
}
type importRemoteKeysResponseJson struct {
Statuses []*statusJson `json:"data"`
}
type deleteRemoteKeysRequestJson struct {
PublicKeys []string `json:"pubkeys" hex:"true"`
}
type deleteRemoteKeysResponseJson struct {
Statuses []*statusJson `json:"data"`
}

View File

@@ -118,6 +118,116 @@ func TestDeleteKeystores_JSONisEqual(t *testing.T) {
}
func TestListRemoteKeys_JSONisEqual(t *testing.T) {
middlewareResponse := &listRemoteKeysResponseJson{
Keystores: []*remoteKeysListJson{
&remoteKeysListJson{
Pubkey: "0x0",
Url: "http://localhost:8080",
Readonly: true,
},
},
}
protoResponse := &service.ListRemoteKeysResponse{
Data: []*service.ListRemoteKeysResponse_Keystore{
&service.ListRemoteKeysResponse_Keystore{
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
Url: "http://localhost:8080",
Readonly: true,
},
},
}
listResp, err := areJsonPropertyNamesEqual(middlewareResponse, protoResponse)
require.NoError(t, err)
require.Equal(t, listResp, true)
resp, err := areJsonPropertyNamesEqual(middlewareResponse.Keystores[0], protoResponse.Data[0])
require.NoError(t, err)
require.Equal(t, resp, true)
}
func TestImportRemoteKeys_JSONisEqual(t *testing.T) {
importKeystoresRequest := &importRemoteKeysRequestJson{}
protoImportRequest := &service.ImportRemoteKeysRequest{
RemoteKeys: []*service.ImportRemoteKeysRequest_Keystore{
&service.ImportRemoteKeysRequest_Keystore{
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
Url: "http://localhost:8080",
},
},
}
requestResp, err := areJsonPropertyNamesEqual(importKeystoresRequest, protoImportRequest)
require.NoError(t, err)
require.Equal(t, requestResp, true)
importKeystoresResponse := &importRemoteKeysResponseJson{
Statuses: []*statusJson{
&statusJson{
Status: "Error",
Message: "a",
},
},
}
protoImportKeystoresResponse := &service.ImportRemoteKeysResponse{
Data: []*service.ImportedRemoteKeysStatus{
&service.ImportedRemoteKeysStatus{
Status: service.ImportedRemoteKeysStatus_ERROR,
Message: "a",
},
},
}
ImportResp, err := areJsonPropertyNamesEqual(importKeystoresResponse, protoImportKeystoresResponse)
require.NoError(t, err)
require.Equal(t, ImportResp, true)
resp, err := areJsonPropertyNamesEqual(importKeystoresResponse.Statuses[0], protoImportKeystoresResponse.Data[0])
require.NoError(t, err)
require.Equal(t, resp, true)
}
func TestDeleteRemoteKeys_JSONisEqual(t *testing.T) {
deleteKeystoresRequest := &deleteRemoteKeysRequestJson{}
protoDeleteRequest := &service.DeleteRemoteKeysRequest{
Pubkeys: [][]byte{[]byte{}},
}
requestResp, err := areJsonPropertyNamesEqual(deleteKeystoresRequest, protoDeleteRequest)
require.NoError(t, err)
require.Equal(t, requestResp, true)
deleteKeystoresResponse := &deleteRemoteKeysResponseJson{
Statuses: []*statusJson{
&statusJson{
Status: "Error",
Message: "a",
},
},
}
protoDeleteResponse := &service.DeleteRemoteKeysResponse{
Data: []*service.DeletedRemoteKeysStatus{
&service.DeletedRemoteKeysStatus{
Status: service.DeletedRemoteKeysStatus_ERROR,
Message: "a",
},
},
}
deleteResp, err := areJsonPropertyNamesEqual(deleteKeystoresResponse, protoDeleteResponse)
require.NoError(t, err)
require.Equal(t, deleteResp, true)
resp, err := areJsonPropertyNamesEqual(deleteKeystoresResponse.Statuses[0], protoDeleteResponse.Data[0])
require.NoError(t, err)
require.Equal(t, resp, true)
}
// note: this does not do a deep comparison of the structs
func areJsonPropertyNamesEqual(internal, proto interface{}) (bool, error) {
internalJSON, err := json.Marshal(internal)

View File

@@ -30,7 +30,7 @@ func (s *Server) ListKeystores(
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
}
if s.wallet.KeymanagerKind() != keymanager.Derived && s.wallet.KeymanagerKind() != keymanager.Local {
return nil, status.Errorf(codes.FailedPrecondition, "Prysm validator keys are not stored locally with this keymanager type.")
@@ -67,7 +67,7 @@ func (s *Server) ImportKeystores(
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get keymanager: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get keymanager (possibly due to beacon node unavailable): %v", err)
}
importer, ok := km.(keymanager.Importer)
if !ok {
@@ -150,7 +150,7 @@ func (s *Server) DeleteKeystores(
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get keymanager: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get keymanager (possibly due to beacon node unavailable): %v", err)
}
if len(req.Pubkeys) == 0 {
return &ethpbservice.DeleteKeystoresResponse{Data: make([]*ethpbservice.DeletedKeystoreStatus, 0)}, nil
@@ -251,3 +251,134 @@ func (s *Server) slashingProtectionHistoryForDeletedKeys(
}
return slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, filteredKeys...)
}
// ListRemoteKeys returns a list of all public keys defined for web3signer keymanager type.
func (s *Server) ListRemoteKeys(ctx context.Context, _ *empty.Empty) (*ethpbservice.ListRemoteKeysResponse, error) {
if !s.walletInitialized {
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
}
if s.validatorService == nil {
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
}
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
}
pubKeys, err := km.FetchValidatingPublicKeys(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve keystores: %v", err)
}
keystoreResponse := make([]*ethpbservice.ListRemoteKeysResponse_Keystore, len(pubKeys))
for i := 0; i < len(pubKeys); i++ {
keystoreResponse[i] = &ethpbservice.ListRemoteKeysResponse_Keystore{
Pubkey: pubKeys[i][:],
Url: s.validatorService.Web3SignerConfig.BaseEndpoint,
Readonly: true,
}
}
return &ethpbservice.ListRemoteKeysResponse{
Data: keystoreResponse,
}, nil
}
// ImportRemoteKeys imports a list of public keys defined for web3signer keymanager type.
func (s *Server) ImportRemoteKeys(ctx context.Context, req *ethpbservice.ImportRemoteKeysRequest) (*ethpbservice.ImportRemoteKeysResponse, error) {
if !s.walletInitialized {
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
}
if s.validatorService == nil {
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err))
}
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
}
adder, ok := km.(keymanager.PublicKeyAdder)
if !ok {
statuses := groupImportRemoteKeysErrors(req, "Keymanager kind cannot import public keys for web3signer keymanager type.")
return &ethpbservice.ImportRemoteKeysResponse{Data: statuses}, nil
}
remoteKeys := make([][fieldparams.BLSPubkeyLength]byte, len(req.RemoteKeys))
isUrlUsed := false
for i, obj := range req.RemoteKeys {
remoteKeys[i] = bytesutil.ToBytes48(obj.Pubkey)
if obj.Url != "" {
isUrlUsed = true
}
}
if isUrlUsed {
log.Warnf("Setting web3signer base url for imported keys is not supported. Prysm only uses the url from --validators-external-signer-url flag for web3signer.")
}
statuses, err := adder.AddPublicKeys(ctx, remoteKeys)
if err != nil {
sts := groupImportRemoteKeysErrors(req, fmt.Sprintf("Could not add keys;error: %v", err))
return &ethpbservice.ImportRemoteKeysResponse{Data: sts}, nil
}
return &ethpbservice.ImportRemoteKeysResponse{
Data: statuses,
}, nil
}
func groupImportRemoteKeysErrors(req *ethpbservice.ImportRemoteKeysRequest, errorMessage string) []*ethpbservice.ImportedRemoteKeysStatus {
statuses := make([]*ethpbservice.ImportedRemoteKeysStatus, len(req.RemoteKeys))
for i := 0; i < len(req.RemoteKeys); i++ {
statuses[i] = &ethpbservice.ImportedRemoteKeysStatus{
Status: ethpbservice.ImportedRemoteKeysStatus_ERROR,
Message: errorMessage,
}
}
return statuses
}
// DeleteRemoteKeys deletes a list of public keys defined for web3signer keymanager type.
func (s *Server) DeleteRemoteKeys(ctx context.Context, req *ethpbservice.DeleteRemoteKeysRequest) (*ethpbservice.DeleteRemoteKeysResponse, error) {
if !s.walletInitialized {
return nil, status.Error(codes.FailedPrecondition, "Prysm Wallet not initialized. Please create a new wallet.")
}
if s.validatorService == nil {
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready.")
}
km, err := s.validatorService.Keymanager()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get Prysm keymanager (possibly due to beacon node unavailable): %v", err)
}
if s.wallet.KeymanagerKind() != keymanager.Web3Signer {
return nil, status.Errorf(codes.FailedPrecondition, "Prysm Wallet is not of type Web3Signer. Please execute validator client with web3signer flags.")
}
deleter, ok := km.(keymanager.PublicKeyDeleter)
if !ok {
statuses := groupDeleteRemoteKeysErrors(req, "Keymanager kind cannot delete public keys for web3signer keymanager type.")
return &ethpbservice.DeleteRemoteKeysResponse{Data: statuses}, nil
}
remoteKeys := make([][fieldparams.BLSPubkeyLength]byte, len(req.Pubkeys))
for i, key := range req.Pubkeys {
remoteKeys[i] = bytesutil.ToBytes48(key)
}
statuses, err := deleter.DeletePublicKeys(ctx, remoteKeys)
if err != nil {
sts := groupDeleteRemoteKeysErrors(req, fmt.Sprintf("Could not delete keys;error: %v", err))
return &ethpbservice.DeleteRemoteKeysResponse{Data: sts}, nil
}
return &ethpbservice.DeleteRemoteKeysResponse{
Data: statuses,
}, nil
}
func groupDeleteRemoteKeysErrors(req *ethpbservice.DeleteRemoteKeysRequest, errorMessage string) []*ethpbservice.DeletedRemoteKeysStatus {
statuses := make([]*ethpbservice.DeletedRemoteKeysStatus, len(req.Pubkeys))
for i := 0; i < len(req.Pubkeys); i++ {
statuses[i] = &ethpbservice.DeletedRemoteKeysStatus{
Status: ethpbservice.DeletedRemoteKeysStatus_ERROR,
Message: errorMessage,
}
}
return statuses
}

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -524,3 +525,158 @@ func createRandomKeystore(t testing.TB, password string) *keymanager.Keystore {
Name: encryptor.Name(),
}
}
func TestServer_ListRemoteKeys(t *testing.T) {
t.Run("wallet not ready", func(t *testing.T) {
s := Server{}
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
})
ctx := context.Background()
w := wallet.NewWalletForWeb3Signer()
root := make([]byte, fieldparams.RootLength)
root[0] = 1
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
pubkeys := [][fieldparams.BLSPubkeyLength]byte{bytesutil.ToBytes48(bytevalue)}
config := &remote_web3signer.SetupConfig{
BaseEndpoint: "http://example.com",
GenesisValidatorsRoot: root,
ProvidedPublicKeys: pubkeys,
}
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
require.NoError(t, err)
vs, err := client.NewValidatorService(ctx, &client.Config{
Wallet: w,
Validator: &mock.MockValidator{
Km: km,
},
Web3SignerConfig: config,
})
require.NoError(t, err)
s := &Server{
walletInitialized: true,
wallet: w,
validatorService: vs,
}
expectedKeys, err := km.FetchValidatingPublicKeys(ctx)
require.NoError(t, err)
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
resp, err := s.ListRemoteKeys(context.Background(), &empty.Empty{})
require.NoError(t, err)
for i := 0; i < len(resp.Data); i++ {
require.DeepEqual(t, expectedKeys[i][:], resp.Data[i].Pubkey)
}
})
}
func TestServer_ImportRemoteKeys(t *testing.T) {
t.Run("wallet not ready", func(t *testing.T) {
s := Server{}
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
})
ctx := context.Background()
w := wallet.NewWalletForWeb3Signer()
root := make([]byte, fieldparams.RootLength)
root[0] = 1
config := &remote_web3signer.SetupConfig{
BaseEndpoint: "http://example.com",
GenesisValidatorsRoot: root,
ProvidedPublicKeys: nil,
}
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
require.NoError(t, err)
vs, err := client.NewValidatorService(ctx, &client.Config{
Wallet: w,
Validator: &mock.MockValidator{
Km: km,
},
Web3SignerConfig: config,
})
require.NoError(t, err)
s := &Server{
walletInitialized: true,
wallet: w,
validatorService: vs,
}
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
remoteKeys := []*ethpbservice.ImportRemoteKeysRequest_Keystore{
{
Pubkey: bytevalue,
},
}
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
resp, err := s.ImportRemoteKeys(context.Background(), &ethpbservice.ImportRemoteKeysRequest{
RemoteKeys: remoteKeys,
})
expectedStatuses := []*ethpbservice.ImportedRemoteKeysStatus{
&ethpbservice.ImportedRemoteKeysStatus{
Status: ethpbservice.ImportedRemoteKeysStatus_IMPORTED,
Message: fmt.Sprintf("Successfully added pubkey: %v", hexutil.Encode(bytevalue)),
},
}
require.NoError(t, err)
for i := 0; i < len(resp.Data); i++ {
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
}
})
}
func TestServer_DeleteRemoteKeys(t *testing.T) {
t.Run("wallet not ready", func(t *testing.T) {
s := Server{}
_, err := s.ListKeystores(context.Background(), &empty.Empty{})
require.ErrorContains(t, "Prysm Wallet not initialized. Please create a new wallet.", err)
})
ctx := context.Background()
w := wallet.NewWalletForWeb3Signer()
root := make([]byte, fieldparams.RootLength)
root[0] = 1
bytevalue, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
pubkeys := [][fieldparams.BLSPubkeyLength]byte{bytesutil.ToBytes48(bytevalue)}
config := &remote_web3signer.SetupConfig{
BaseEndpoint: "http://example.com",
GenesisValidatorsRoot: root,
ProvidedPublicKeys: pubkeys,
}
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
require.NoError(t, err)
vs, err := client.NewValidatorService(ctx, &client.Config{
Wallet: w,
Validator: &mock.MockValidator{
Km: km,
},
Web3SignerConfig: config,
})
require.NoError(t, err)
s := &Server{
walletInitialized: true,
wallet: w,
validatorService: vs,
}
t.Run("returns proper data with existing pub keystores", func(t *testing.T) {
resp, err := s.DeleteRemoteKeys(context.Background(), &ethpbservice.DeleteRemoteKeysRequest{
Pubkeys: [][]byte{bytevalue},
})
expectedStatuses := []*ethpbservice.DeletedRemoteKeysStatus{
&ethpbservice.DeletedRemoteKeysStatus{
Status: ethpbservice.DeletedRemoteKeysStatus_DELETED,
Message: fmt.Sprintf("Successfully deleted pubkey: %v", hexutil.Encode(bytevalue)),
},
}
require.NoError(t, err)
for i := 0; i < len(resp.Data); i++ {
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
}
expectedKeys, err := km.FetchValidatingPublicKeys(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(expectedKeys))
})
}