Compare commits

..

17 Commits

Author SHA1 Message Date
nisdas
82e7c0ed24 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into trie-debug 2022-04-14 07:32:44 +08:00
Nishant Das
1b6adca3ca Handle Finalized Deposit Insertion Better (#10517) 2022-04-13 10:08:19 +02:00
Nishant Das
1651649e5a Update to 1.17.9 (#10518) 2022-04-13 05:25:13 +00:00
Potuz
56187edb98 Use forkchoice first when checking canonical status (#10516) 2022-04-12 21:34:13 -03:00
Preston Van Loon
ecad5bbffc e2e: Provide e2e config yaml to web3signer (#10123)
* e2e: Provide e2e config yaml to web3signer

* fix build for //testing/endtoend:go_default_test

* Update with web3signer with teku fixes

* buildifier

* Add slasher case for web3signer

* Update testing/endtoend/minimal_e2e_test.go

* Update web3signer to 21.10.6

* Revert "Update web3signer to 21.10.6"

This reverts commit bdf3c408f2.

* Remove slasher part of web3signer e2e tests

* Revert "Remove slasher part of web3signer e2e tests"

This reverts commit 24249802ae.

* fix slasher web3signer test

* fixing build

* updating yaml to match testnet_e2e_config.go

* trying a different order to the e2e test and adding a log

* trying different way to kill process

* handling unhandled error

* testing changes to config WIP

* fixing bazel WIP

* fixing build

* ignoring test for now to test

* fixing bazel

* Test only web3signer e2e

* rolling back some commits to test

* fixing bazel

* trying an updated web3signer version

* changing flag to match version

* trying current version of develop for web3signer

* testing not using the --network property

* addressing build error

* testing config change

* reverting to go bakc to using the network file

* testing adding epochs per sync committee period

* rolling back configs

* removing check to test

* adding log to get sync committee duties and changing bellatrix fork epoch to something large and altair to epoch 1

* fixing bazel

* updating epoch in config file

* fixing more descrepencies between the configurations

* removing un used yaml

* removing goland added duplicates

* reverting using network minimal

* fixing bug

* rolling back some changes

* rolling back some changes

* rolling back changes

* making sure web3signer test doesn't make it to bellatrix fork yet

* reverting changes I did not touch

* undo comment

* Update testing/endtoend/deps.bzl

* Apply suggestions from code review

* rm nl

* fix //testing/endtoend:go_mainnet_test

* Remove unnecessary dep

* fix //testing/endtoend:go_mainnet_test

* addressing review comments

* fixing build and internal conflict

* removing web3signer slasher test as it's unneeded due to the interface nature of key signing, the regular slashing test is enough

* fix: The validator we fetch from the binary can only run before altair, if you add that and the web3signer then these things can never run together as the web3signer sets it to before bellatrix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: James He <james@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-04-12 16:57:46 +00:00
Nishant Das
407182387b fix it all (#10511) 2022-04-12 19:50:29 +08:00
terence tsao
ad0b0b503d Move GetTerminalBlockHash to powchain engine (#10500)
* Move GetTerminalBlockHash to powchain engine

* Update service.go

* Update service.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-12 10:19:07 +00:00
terence tsao
58f4ba758c Metrics tracking EE VALID/SYNCING/INVALID response counter (#10504)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-12 09:51:13 +00:00
nisdas
df3e23665b debug 2022-04-12 16:59:09 +08:00
james-prysm
64f64f06bf Remote Key Manager API(web3signer) (#10302)
* removing flag requirement, can run web3signer without predefined public keys

* placeholders for remote-keymanager-api

* adding proto and accountschangedfeed

* updating generated code

* fix imports

* fixing interface

* adding work in progress apimiddleware code

* started implementing functions for remote keymanager api

* fixing generted code from proto

* fixing protos

* fixing import format

* fixing proto generation again , didn't fix the first time

* fixing imports again

* continuing on implementing functions

* implementing add function

* implementing delete API function

* handling errors for API

* removing unusedcode and fixing format

* fixing bazel

* wip enable --web when running web3signer

* fixing wallet check for web3signer

* fixing apis

* adding list remote keys unit test

* import remote keys test

* delete pubkeys tests

* moving location of tests

* adding unit tests

* adding placeholder functions

* adding more unit tests

* fixing bazel

* fixing build

* fixing already slice issue with unit test

* fixing linting

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/node/node.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* adding comment on proto based on review

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding generated code based on review

* updating based on feedback

* fixing imports

* fixing formatting

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing event call

* fixing dependency

* updating bazel

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing comment from review

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-11 16:05:40 -04:00
terence tsao
e70055733f Save state to DB after proposer boost (#10509) 2022-04-11 16:51:49 +00:00
Radosław Kapka
36e4f49af0 Bellatrix evaluators (#10506)
* defensive nil check

* separate ExecutionPayload/Header from codegen

* tell bazel about this new file

* Merge: support terminal difficulty override (#9769)

* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* add in all the fixes

* fix evaluator bugs

* latest fixes

* sum

* fix to be configurable

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* fix broken conditional checks

* fix all issues

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Revert "fix broken conditional checks"

This reverts commit e118db6c20.

* LH multiclient working

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* fix race

* fix test

* Fix base fee per gas

* Fix notifypayload headroot

* tx fuzzer

* clean up with develop branch

* save progress

* 200tx/block

* add LH flags

* Sync with devleop

* cleanup

* cleanup

* hash

* fix build

* fix test

* fix go check

* fmt

* gosec

* Blocked stream

(cherry picked from commit f362af9862db680b6352692217ad5c08d44a1e86)

# Conflicts:
#	proto/prysm/v1alpha1/validator.pb.go

* remove duplicate param

* test

* revert some test changes

* Initial version of EE tx count

* evaluate all txs in epoch

* remove logs

* uncomment tests

* remove unwanted change

* parameterize ExpectedExecEngineTxsThreshold

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-11 13:45:22 +00:00
terence tsao
d98428dec4 Can prune nodes from canonical and payload maps (#10496)
* Can prune nodes from canonical and payload maps

* Update store_test.go

* prune payload hashes, canonical nodes and better testing

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-11 11:08:50 +00:00
Nishant Das
00b92e01d3 Fetch Non Finalized Deposits Better (#10505) 2022-04-11 09:59:22 +02:00
Potuz
ca5adbf7e4 Fix optimistic logging (#10503) 2022-04-09 23:09:04 +00:00
Nishant Das
a083b7a0a5 Set Auth Differently In Our Powchain Constructor (#10501) 2022-04-09 11:03:05 +02:00
Raul Jordan
dd5995b665 Proper Connection Management for ETH JSON-RPC Client for Startup and Runtime (#10498)
* begin connection management revamp

* kiln runs

* retry

* reconnect

* add

* rpc connect fix

* remove logging

* logs

* retry

* default value for web3flag

* test pass

* comments

* ensure auth works
2022-04-09 09:28:40 +08:00
115 changed files with 4262 additions and 2708 deletions

View File

@@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.17.6",
go_version = "1.17.9",
nogo = "@//:nogo",
)

View File

@@ -40,15 +40,15 @@ func (od *OriginData) CheckpointString() string {
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
blockPath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.sb)
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
return statePath, file.WriteFile(statePath, od.StateBytes())
return statePath, file.WriteFile(statePath, od.sb)
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.

View File

@@ -66,6 +66,7 @@ go_library(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",

View File

@@ -273,13 +273,13 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
// If the block has been finalized, the block will always be part of the canonical chain.
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
// If the block has been finalized, the block will always be part of the canonical chain.
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).

View File

@@ -138,6 +138,26 @@ var (
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_valid_node_count",
Help: "Count the number of valid nodes after newPayload EE call",
})
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_optimistic_node_count",
Help: "Count the number of optimistic nodes after newPayload EE call",
})
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_invalid_node_count",
Help: "Count the number of invalid nodes after newPayload EE call",
})
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_valid_node_count",
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -46,15 +46,31 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getFinalizedPayloadHash(ctx, finalizedRoot)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized payload hash")
return nil, errors.Wrap(err, "could not get finalized block")
}
if finalizedBlock == nil || finalizedBlock.IsNil() {
finalizedBlock = s.getInitSyncBlock(s.ensureRootNotZeros(finalizedRoot))
if finalizedBlock == nil || finalizedBlock.IsNil() {
return nil, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
}
var finalizedHash []byte
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash[:],
FinalizedBlockHash: finalizedHash,
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -67,16 +83,18 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
@@ -88,55 +106,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
return payloadID, nil
}
// getFinalizedPayloadHash returns the finalized payload hash for the given finalized block root.
// It checks the following in order:
// 1. The finalized block exists in db
// 2. The finalized block exists in initial sync block cache
// 3. The finalized block is the weak subjectivity block and exists in db
// Error is returned if the finalized block is not found from above.
func (s *Service) getFinalizedPayloadHash(ctx context.Context, finalizedRoot [32]byte) ([32]byte, error) {
b, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
b = s.getInitSyncBlock(finalizedRoot)
if b != nil {
return getPayloadHash(b.Block())
}
r, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
return [32]byte{}, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
// getPayloadHash returns the payload hash for the input given block.
// zeros are returned if the block is older than bellatrix.
func getPayloadHash(b block.BeaconBlock) ([32]byte, error) {
if blocks.IsPreBellatrixVersion(b.Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := b.Body().ExecutionPayload()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
@@ -168,12 +137,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return false, nil
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
@@ -190,7 +161,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
newPayloadValidNodeCount.Inc()
// During the transition event, the transition block should be verified for sanity.
if blocks.IsPreBellatrixVersion(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.

View File

@@ -792,74 +792,3 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
require.NoError(t, err)
require.Equal(t, false, has)
}
func TestService_getFinalizedPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Use the block in DB
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
h, err := service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the block in init sync cache
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hello"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
service.initSyncBlocks[r] = blk
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the weak subjectivity sync block
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("howdy"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, r))
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// None of the above should error
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{'a'}))
_, err = service.getFinalizedPayloadHash(ctx, [32]byte{'a'})
require.ErrorContains(t, "does not exist in the db or our cache", err)
}
func TestService_getPayloadHash(t *testing.T) {
// Pre-bellatrix
blk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
h, err := getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, [32]byte{}, h)
// Post bellatrix
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
h, err = getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(bytesutil.PadTo([]byte("hi"), 32)), h)
}

View File

@@ -127,8 +127,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -148,6 +148,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -506,7 +509,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
log.Infof("UpdateCommitteeCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot), slot=%d, epoch=%d", postState.Slot(), coreTime.CurrentEpoch(postState))
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
return err
}
@@ -515,7 +517,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
@@ -531,12 +532,9 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
log.Info("UpdateCommitteeCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
@@ -614,9 +612,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
@@ -402,10 +403,13 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
return errors.Wrap(err, "could not cast eth1 deposit index")
}
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
return nil

View File

@@ -1516,6 +1516,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(7))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
@@ -1529,8 +1530,64 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(5))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
gs2 := gs.Copy()
assert.NoError(t, gs2.SetEth1Data(&ethpb.Eth1Data{DepositCount: 15}))
assert.NoError(t, gs2.SetEth1DepositIndex(12))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
depositCache.InsertFinalizedDeposits(ctx, 2)
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
// Insert New Finalized State with higher deposit count.
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}

View File

@@ -83,7 +83,8 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
return nil
}

View File

@@ -440,11 +440,9 @@ func (s *Service) initializeBeaconChain(
s.cfg.ChainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
log.Infof("UpdateCommitteeCache from initializeBeaconChain, slot=%d", genesisState.Slot())
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return nil, err
}
log.Info("UpdateProposerIndicesInCache from initializeBeaconChain")
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}

View File

@@ -36,7 +36,7 @@ type DepositFetcher interface {
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
}
// FinalizedDeposits stores the trie of deposits that have been included
@@ -137,6 +137,22 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
depositTrie := dc.finalizedDeposits.Deposits
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
continue
@@ -246,7 +262,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
@@ -256,10 +272,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
return dc.allDeposits(untilBlk)
}
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}

View File

@@ -459,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
Index: 1,
},
}
newFinalizedDeposit := ethpb.DepositContainer{
newFinalizedDeposit := &ethpb.DepositContainer{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
@@ -471,17 +471,17 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
}
dc.deposits = oldFinalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 1)
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
var deps [][]byte
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
for _, d := range oldFinalizedDeposits {
hash, err := d.Deposit.Data.HashTreeRoot()
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
@@ -491,6 +491,140 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{3}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 3,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{4}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 4,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{5}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 5,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
// Reinsert finalized deposits with a lower index.
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
@@ -554,7 +688,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), nil)
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
assert.Equal(t, 2, len(deps))
}
@@ -611,7 +745,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
}

View File

@@ -6,7 +6,6 @@ import (
"bytes"
"context"
"fmt"
log "github.com/sirupsen/logrus"
"sort"
"github.com/pkg/errors"
@@ -287,44 +286,41 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
//for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
log.Infof("computed seed=%#x for slot=%d", seed, state.Slot())
if committeeCache.HasEntry(string(seed[:])) {
log.Infof("UpdateCommitteeCache: seed=%#x already in cache at slot=%d", seed, state.Slot())
return nil
}
for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, epoch)
if err != nil {
return err
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
log.Infof("UpdateCommitteeCache: epoch=%d, state.slot=%d, indices=%v, seed=%#x", epoch, state.Slot(), sortedIndices, seed)
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
//}
return nil
}
@@ -367,7 +363,6 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
log.Infof("UpdateProposerIndicesInCache: state.slot=%d, slot=%d, root=%#x, indices=%v", state.Slot(), s, r, indices)
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
BlockRoot: bytesutil.ToBytes32(r),
ProposerIndices: proposerIndices,

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
log "github.com/sirupsen/logrus"
)
// Seed returns the randao seed used for shuffling of a given epoch.
@@ -34,8 +33,6 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain
seed32 := hash.Hash(seed)
log.Infof("seed computation params for slot=%d: domain=%#x, epoch=%d, randaoMix=%#x", state.Slot(), domain, epoch, randaoMix)
return seed32, nil
}

View File

@@ -3,8 +3,6 @@ package helpers
import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -17,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
)
@@ -89,27 +88,11 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
if err != nil {
return nil, errors.Wrap(err, "could not get seed")
}
var ci []types.ValidatorIndex
if s.Slot() == 78 {
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
ci = append(ci, types.ValidatorIndex(idx))
}
return nil
}); err != nil {
log.Errorf("got error doing double-check validator index computation=%v", err)
return nil, err
}
}
activeIndices, err := committeeCache.ActiveIndices(ctx, seed)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
}
if activeIndices != nil {
if s.Slot() == 78 {
log.Infof("double check indices for 78, len=%d, low=%d, high=%d", len(ci), ci[0], ci[len(ci)-1])
}
log.Infof("found indices in cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
@@ -123,7 +106,6 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, errors.New("nil active indices")
}
CommitteeCacheInProgressHit.Inc()
log.Infof("found indices in in-progress cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
return nil, errors.Wrap(err, "could not mark committee cache as in progress")
@@ -144,16 +126,9 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, err
}
log.Infof("computed indices slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(indices), indices[0], indices[len(indices)-1])
log.Infof("UpdateCommitteeCache from ActiveValidatorIndices, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return nil, errors.Wrap(err, "could not update committee cache")
}
/*
if err := UpdateProposerIndicesInCache(ctx, s); err != nil {
return nil, errors.Wrap(err, "failed to update proposer indices cache in ActiveValidatorIndices")
}
*/
return indices, nil
}
@@ -200,7 +175,6 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
return 0, err
}
log.Infof("UpdateCommitteeCache from ActiveValidatorCount, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -275,7 +249,6 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
log.Info("UpdateProposerIndicesInCache from BeaconProposerIndex")
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -286,19 +259,15 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
if err != nil {
return 0, errors.Wrap(err, "could not generate seed")
}
fmt.Printf("BeaconProposerIndex:seed=%#x", seed)
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(state.Slot()))...)
fmt.Printf("BeaconProposerIndex:seedWithSlot=%#x", seed)
seedWithSlotHash := hash.Hash(seedWithSlot)
fmt.Printf("BeaconProposerIndex:seedWithSlotHash=%#x", seed)
indices, err := ActiveValidatorIndices(ctx, state, e)
if err != nil {
return 0, errors.Wrap(err, "could not get active indices")
}
log.Infof("validator index length=%d, low=%d, high=%d", len(indices), indices[0], indices[len(indices)-1])
return ComputeProposerIndex(state, indices, seedWithSlotHash)
}

View File

@@ -138,7 +138,6 @@ func ProcessSlotsUsingNextSlotCache(
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
defer span.End()
/*
// Check whether the parent state has been advanced by 1 slot in next slot cache.
nextSlotState, err := NextSlotState(ctx, parentRoot)
if err != nil {
@@ -149,11 +148,6 @@ func ProcessSlotsUsingNextSlotCache(
// We replace next slot state with parent state.
if cachedStateExists {
parentState = nextSlotState
root, err := parentState.HashTreeRoot(ctx)
if err != nil {
log.Errorf("weird, got an error calling HTR for the state=%v where root should be=%#x", err, parentRoot)
}
log.Infof("found state in NextSlotCache at slot=%d with root=%#x (parentRoot=%#x)", parentState.Slot(), root, parentRoot)
}
// In the event our cached state has advanced our
@@ -161,12 +155,9 @@ func ProcessSlotsUsingNextSlotCache(
if cachedStateExists && parentState.Slot() == slot {
return parentState, nil
}
*/
log.Infof("process_slots being called up to slot=%d where state.slot=%d", slot, parentState.Slot())
// Since next slot cache only advances state by 1 slot,
// we check if there's more slots that need to process.
parentState, err := ProcessSlots(ctx, parentState, slot)
parentState, err = ProcessSlots(ctx, parentState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}

View File

@@ -282,11 +282,7 @@ func ProcessBlockForStateRoot(
state, err = b.ProcessBlockHeaderNoVerify(ctx, state, blk.Slot(), blk.ProposerIndex(), blk.ParentRoot(), bodyRoot[:])
if err != nil {
tracing.AnnotateError(span, err)
r, err := signed.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not process block header, also failed to compute its htr")
}
return nil, errors.Wrapf(err, "could not process block header, state slot=%d, root=%#x", state.Slot(), r)
return nil, errors.Wrap(err, "could not process block header")
}
enabled, err := b.IsExecutionEnabled(state, blk.Body())

View File

@@ -105,7 +105,6 @@ type HeadAccessDatabase interface {
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}

View File

@@ -23,7 +23,7 @@ var previousFinalizedCheckpointKey = []byte("previous-finalized-checkpoint")
var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to determine canonical")
// The finalized block roots index tracks beacon blocks which are finalized in the canonical chain.
// The finalized checkpoint contains the epoch which was finalized and the highest beacon block
// The finalized checkpoint contains the the epoch which was finalized and the highest beacon block
// root where block.slot <= start_slot(epoch). As a result, we cannot index the finalized canonical
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
@@ -75,7 +75,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
if bytes.Equal(root, genesisRoot) || bytes.Equal(root, initCheckpointRoot) {
break
}
@@ -105,12 +105,6 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
if parentBytes := bkt.Get(block.ParentRoot()); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}

View File

@@ -42,8 +42,4 @@ func TestSaveOrigin(t *testing.T) {
cbb, err := scb.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
broot, err := scb.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, db.IsFinalizedBlock(ctx, broot))
}

View File

@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
}
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
return []*ethpb.Deposit{}
}

View File

@@ -51,10 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_validated_count",
Help: "The number of blocks that have been fully validated.",
},
)
)

View File

@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
}
n.optimistic = false
validatedCount.Inc()
return n.parent.setNodeAndParentValidated(ctx)
}

View File

@@ -51,10 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedNodesCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proto_array_validated_nodes_count",
Help: "The number of nodes that have been fully validated.",
},
)
)

View File

@@ -43,7 +43,6 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) er
if index == NonExistentNode {
break
}
validatedNodesCount.Inc()
}
return nil
}

View File

@@ -614,16 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
node := copyNode(s.nodes[idx])
parentIdx, ok := canonicalNodesMap[node.parent]
if ok {
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
currentIndex := uint64(len(canonicalNodes))
s.nodesIndices[node.root] = currentIndex
s.payloadIndices[node.payloadHash] = currentIndex
canonicalNodesMap[idx] = currentIndex
node.parent = parentIdx
canonicalNodes = append(canonicalNodes, node)
} else {
// Remove node and synced tip that is not part of finalized branch.
// Remove node that is not part of finalized branch.
delete(s.nodesIndices, node.root)
delete(s.canonicalNodes, node.root)
delete(s.payloadIndices, node.payloadHash)
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
s.canonicalNodes[finalizedRoot] = true
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
// Recompute the best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {

View File

@@ -375,7 +375,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
@@ -413,7 +413,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
s := &Store{nodes: nodes, nodesIndices: indices}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
@@ -441,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestDescendant: 1,
root: indexToHash(uint64(0)),
parent: NonExistentNode,
payloadHash: [32]byte{'A'},
},
{
slot: 101,
@@ -448,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
parent: 0,
payloadHash: [32]byte{'B'},
},
{
slot: 101,
@@ -455,6 +457,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
parent: 0,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
payloadHash: [32]byte{'C'},
},
}
s := &Store{
@@ -465,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
indexToHash(uint64(1)): 1,
indexToHash(uint64(2)): 2,
},
canonicalNodes: map[[32]byte]bool{
indexToHash(uint64(0)): true,
indexToHash(uint64(1)): true,
indexToHash(uint64(2)): true,
},
payloadIndices: map[[32]byte]uint64{
[32]byte{'A'}: 0,
[32]byte{'B'}: 1,
[32]byte{'C'}: 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, len(s.nodes), 1)
require.Equal(t, 1, len(s.nodes))
require.Equal(t, 1, len(s.nodesIndices))
require.Equal(t, 1, len(s.canonicalNodes))
require.Equal(t, 1, len(s.payloadIndices))
}
// This test starts with the following branching diagram
@@ -482,25 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
func TestStore_PruneSyncedTips(t *testing.T) {
func TestStore_PruneBranched(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, f.NodeCount())
tests := []struct {
finalizedRoot [32]byte
wantedCanonical [32]byte
wantedNonCanonical [32]byte
canonicalCount int
payloadHash [32]byte
payloadIndex uint64
nonExistentPayload [32]byte
}{
{
[32]byte{'f'},
[32]byte{'f'},
[32]byte{'a'},
1,
[32]byte{'F'},
0,
[32]byte{'H'},
},
{
[32]byte{'d'},
[32]byte{'e'},
[32]byte{'i'},
3,
[32]byte{'E'},
1,
[32]byte{'C'},
},
{
[32]byte{'b'},
[32]byte{'f'},
[32]byte{'h'},
5,
[32]byte{'D'},
3,
[32]byte{'A'},
},
}
for _, tc := range tests {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
f.store.pruneThreshold = 0
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
require.Equal(t, false, ok)
}
}
func TestStore_LeadsToViableHead(t *testing.T) {

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"block_cache.go",
"block_reader.go",
"check_transition_config.go",
@@ -16,6 +15,7 @@ go_library(
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
@@ -59,7 +59,6 @@ go_library(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -74,7 +73,6 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"auth_test.go",
"block_cache_test.go",
"block_reader_test.go",
"check_transition_config_test.go",
@@ -125,7 +123,6 @@ go_test(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_ethereum_go_ethereum//trie:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/network"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
)
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
return
}
case tm := <-ticker.C:
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
err = s.ExchangeTransitionConfiguration(ctx, cfg)
s.handleExchangeConfigurationError(err)
if !hasTtdReached {

View File

@@ -10,9 +10,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -49,8 +52,8 @@ type EngineCaller interface {
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
@@ -174,6 +177,78 @@ func (s *Service) ExchangeTransitionConfiguration(
return nil
}
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := s.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
@@ -251,3 +326,15 @@ func isTimeout(e error) bool {
t, ok := e.(httpTimeoutError)
return ok && t.Timeout()
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}

View File

@@ -418,6 +418,155 @@ func TestClient_HTTP(t *testing.T) {
})
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
client := mocks.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
}
b, e, err := client.GetTerminalBlockHash(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()

View File

@@ -1,9 +1,6 @@
package powchain
import (
"net/http"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
@@ -11,11 +8,9 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
type Option func(s *Service) error
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
}
}
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithJWTSecret(secret []byte) Option {
return func(c *Service) error {
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
return func(s *Service) error {
if len(secret) == 0 {
return nil
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: secret,
stringEndpoints := dedupEndpoints(endpointStrings)
endpoints := make([]network.Endpoint, len(stringEndpoints))
// Overwrite authorization type for all endpoints to be of a bearer
// type.
for i, e := range stringEndpoints {
hEndpoint := HttpEndpoint(e)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)
endpoints[i] = hEndpoint
}
c.cfg.httpRPCClient = &http.Client{
Timeout: DefaultRPCHTTPTimeout,
Transport: authTransport,
// Select first http endpoint in the provided list.
var currEndpoint network.Endpoint
if len(endpointStrings) > 0 {
currEndpoint = endpoints[0]
}
s.cfg.httpEndpoints = endpoints
s.cfg.currHttpEndpoint = currEndpoint
return nil
}
}

View File

@@ -0,0 +1,176 @@
package powchain
import (
"context"
"fmt"
"net/url"
"time"
"github.com/ethereum/go-ethereum/ethclient"
gethRPC "github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
// Attach the clients to the service struct.
fetcher := ethclient.NewClient(client)
s.rpcClient = client
s.httpLogger = fetcher
s.eth1DataFetcher = fetcher
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
if err != nil {
client.Close()
return errors.Wrap(err, "could not initialize deposit contract caller")
}
s.depositContractCaller = depositContractCaller
// Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
}
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
// connection and if this does not work, we fallback to the next endpoint if defined.
func (s *Service) pollConnectionStatus(ctx context.Context) {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
errorLogger(err, "Could not connect to execution client endpoint")
s.runError = err
s.fallbackToNextEndpoint()
}
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// Forces to retry an execution client connection.
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before redialing.
time.Sleep(backOffPeriod)
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
s.runError = err
return
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
s.updateCurrHttpEndpoint(primaryEndpoint)
}
// This is an inefficient way to search for the next endpoint, but given N is
// expected to be small, it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
if err != nil {
return nil, err
}
case "":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, err
}
client.SetHeader("Authorization", header)
}
return client, nil
}
// Checks the chain ID of the execution client to ensure
// it matches local parameters of what Prysm expects.
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
cID, err := client.ChainID(ctx)
if err != nil {
return err
}
wantChainID := params.BeaconConfig().DepositChainID
if cID.Uint64() != wantChainID {
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"math/big"
"net/http"
"reflect"
"runtime/debug"
"sort"
@@ -39,10 +38,8 @@ import (
"github.com/prysmaticlabs/prysm/container/trie"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
@@ -114,6 +111,7 @@ type Chain interface {
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
// fetching eth1 data from the clients.
type RPCDataFetcher interface {
Close()
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
// RPCClient defines the rpc methods required to interact with the eth1 node.
type RPCClient interface {
Close()
BatchCall(b []gethRPC.BatchElem) error
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
}
@@ -135,7 +134,6 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
httpRPCClient *http.Client
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
}
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a web3 service's main event loop.
func (s *Service) Start() {
if err := s.connectToPowChain(); err != nil {
log.WithError(err).Fatal("Could not connect to execution endpoint")
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
log.WithError(err).Error("Could not connect to execution endpoint")
}
log.WithFields(logrus.Fields{
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
}).Info("Connected to Ethereum execution client RPC")
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
// able to generate the genesis state.
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
@@ -253,7 +246,7 @@ func (s *Service) Start() {
s.isRunning = true
// Poll the execution client connection and fallback if errors occur.
go s.pollConnectionStatus()
go s.pollConnectionStatus(s.ctx)
// Check transition configuration for the engine API client in the background.
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
if s.cancel != nil {
defer s.cancel()
}
s.closeClients()
if s.rpcClient != nil {
s.rpcClient.Close()
}
if s.eth1DataFetcher != nil {
s.eth1DataFetcher.Close()
}
return nil
}
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
func (s *Service) CurrentETH1ConnectionError() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
httpClient.Close()
rpcClient.Close()
return err
return s.runError
}
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
@@ -358,10 +353,17 @@ func (s *Service) ETH1Endpoints() []string {
func (s *Service) ETH1ConnectionErrors() []error {
var errs []error
for _, ep := range s.cfg.httpEndpoints {
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
httpClient.Close()
rpcClient.Close()
errs = append(errs, err)
client, err := s.newRPCClientWithAuth(s.ctx, ep)
if err != nil {
errs = append(errs, err)
continue
}
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
client.Close()
errs = append(errs, err)
continue
}
client.Close()
}
return errs
}
@@ -376,146 +378,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
return latestValidBlock, nil
}
func (s *Service) connectToPowChain() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
if err != nil {
return errors.Wrap(err, "could not initialize deposit contract caller")
}
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
return errors.New("execution client RPC is nil")
}
s.httpLogger = httpClient
s.eth1DataFetcher = httpClient
s.depositContractCaller = depositContractCaller
s.rpcClient = rpcClient
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
if err != nil {
return nil, nil, err
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, nil, err
}
httpRPCClient.SetHeader("Authorization", header)
}
httpClient := ethclient.NewClient(httpRPCClient)
// Add a method to clean-up and close clients in the event
// of any connection failure.
closeClients := func() {
httpRPCClient.Close()
httpClient.Close()
}
// Make a simple call to ensure we are actually connected to a working node.
cID, err := httpClient.ChainID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
nID, err := httpClient.NetworkID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
if cID.Uint64() != params.BeaconConfig().DepositChainID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
}
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
}
return httpClient, httpRPCClient, nil
}
// closes down our active eth1 clients.
func (s *Service) closeClients() {
gethClient, ok := s.rpcClient.(*gethRPC.Client)
if ok {
gethClient.Close()
}
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
if ok {
httpClient.Close()
}
}
func (s *Service) pollConnectionStatus() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
errConnect := s.connectToPowChain()
if errConnect != nil {
errorLogger(errConnect, "Could not connect to powchain endpoint")
s.runError = errConnect
s.fallbackToNextEndpoint()
continue
}
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// checks if the eth1 node is healthy and ready to serve before
// fetching data from it.
func (s *Service) isEth1NodeSynced() (bool, error) {
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
if err != nil {
return false, err
}
if syncProg != nil {
return false, nil
}
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
return false, err
}
return !eth1HeadIsBehind(head.Time), nil
}
// Reconnect to eth1 node in case of any failure.
func (s *Service) retryETH1Node(err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before
// resuming dialing the eth1 node.
time.Sleep(backOffPeriod)
if err := s.connectToPowChain(); err != nil {
s.runError = err
return
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
if len(ctrs) == 0 {
return nil
@@ -650,7 +512,7 @@ func (s *Service) handleETH1FollowDistance() {
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
// check that web3 client is syncing
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
log.Warn("eth1 client is not syncing")
log.Warn("Execution client is not syncing")
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
@@ -680,6 +542,15 @@ func (s *Service) handleETH1FollowDistance() {
}
func (s *Service) initPOWService() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
// Run in a select loop to retry in the event of any failures.
for {
@@ -690,8 +561,8 @@ func (s *Service) initPOWService() {
ctx := s.ctx
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
if err != nil {
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve latest execution client header")
continue
}
@@ -700,14 +571,14 @@ func (s *Service) initPOWService() {
s.latestEth1Data.BlockTime = header.Time
if err := s.processPastLogs(ctx); err != nil {
log.Errorf("Unable to process past logs %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to process past deposit contract logs")
continue
}
// Cache eth1 headers from our voting period.
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
log.Errorf("Unable to process past headers %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to cache headers for execution client votes")
continue
}
// Handle edge case with embedded genesis state by fetching genesis header to determine
@@ -720,15 +591,15 @@ func (s *Service) initPOWService() {
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
if err != nil {
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
continue
}
genBlock = genHeader.Number.Uint64()
}
s.chainStartData.GenesisBlock = genBlock
if err := s.savePowchainData(ctx); err != nil {
log.Errorf("Unable to save powchain data: %v", err)
errorLogger(err, "Unable to save execution client data")
}
}
return
@@ -757,17 +628,16 @@ func (s *Service) run(done <-chan struct{}) {
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
log.WithError(err).Debug("Could not fetch latest eth1 header")
s.retryETH1Node(err)
continue
}
if eth1HeadIsBehind(head.Time) {
s.retryExecutionClientConnection(s.ctx, err)
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
s.retryETH1Node(errFarBehind)
continue
}
s.processBlockHeader(head)
s.handleETH1FollowDistance()
s.checkDefaultEndpoint()
s.checkDefaultEndpoint(s.ctx)
case <-chainstartTicker.C:
if s.chainStartData.Chainstarted {
chainstartTicker.Stop()
@@ -854,59 +724,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
return hdr.Number.Uint64(), nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint() {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
if err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
log.Info("Primary endpoint ready again, switching back to it")
// Close the clients and let our main connection routine
// properly connect with it.
httpClient.Close()
rpcClient.Close()
// Close current active clients.
s.closeClients()
// Switch back to primary endpoint and try connecting
// to it again.
s.updateCurrHttpEndpoint(primaryEndpoint)
s.retryETH1Node(nil)
}
// This is an inefficient way to search for the next endpoint, but given N is expected to be
// small ( < 25), it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// initializes our service from the provided eth1data object by initializing all the relevant
// fields and data.
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {

View File

@@ -42,6 +42,8 @@ type goodLogger struct {
backend *backends.SimulatedBackend
}
func (_ *goodLogger) Close() {}
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
if g.backend == nil {
return new(event.Feed).Subscribe(ch), nil
@@ -80,6 +82,8 @@ type goodFetcher struct {
backend *backends.SimulatedBackend
}
func (_ *goodFetcher) Close() {}
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
now := time.Now()
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
testAcc.Backend.Commit()
synced, err := web3Service.isEth1NodeSynced()
require.NoError(t, err)
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
}
func TestFollowBlock_OK(t *testing.T) {
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
s.chainStartData.Chainstarted = true
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
assert.Equal(t, 0, len(deps))
}

View File

@@ -26,6 +26,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,25 +2,32 @@ package testing
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
}
// NewPayload --
@@ -58,3 +65,52 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
}
return b, e.ErrExecBlockByHash
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := e.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
currentTotalDifficulty, _ := uint256.FromBig(b)
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if blockReachedTTD {
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentTotalDifficulty, _ := uint256.FromBig(b)
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}

View File

@@ -144,6 +144,8 @@ type RPCClient struct {
Backend *backends.SimulatedBackend
}
func (_ *RPCClient) Close() {}
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return nil
}

View File

@@ -77,7 +77,6 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -162,7 +161,6 @@ go_test(
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
case version.Phase0:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
continue
}
b.Block = &ethpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
case version.Altair:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
continue
}
b.Block = &ethpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
case version.Bellatrix:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
if !ok {
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
continue
}
b.Block = &ethpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
}
if err := stream.Send(b); err != nil {

View File

@@ -159,9 +159,12 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
var depositTrie *trie.SparseMerkleTrie
log.Infof("eth1 data with hash %#x and count %d at height %d", canonicalEth1Data.BlockHash, canonicalEth1Data.DepositCount, canonicalEth1DataHeight.Uint64())
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
depositTrie = finalizedDeposits.Deposits
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
log.Infof("deposit trie has number of items of %d at index current index %d", depositTrie.NumOfItems(), finalizedDeposits.MerkleTrieIndex)
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
log.Infof("received %d non finalized deposits", len(upToEth1DataDeposits))
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
for _, dep := range upToEth1DataDeposits {
@@ -178,7 +181,20 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
// Log a warning here, as the cached trie is invalid.
if !valid {
log.Warnf("Cached deposit trie is invalid, rebuilding it now: %v", err)
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
rebuiltTrie, err := vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
if err != nil {
return nil, err
}
oldItems := depositTrie.Items()
newItems := rebuiltTrie.Items()
for i, item := range newItems {
if i >= len(oldItems) {
continue
}
if !bytes.Equal(item, oldItems[i]) {
log.Warnf("Index %d has a different result: wanted %#x but got %#x", i, item, oldItems[i])
}
}
}
return depositTrie, nil

View File

@@ -3,11 +3,7 @@ package validator
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -181,79 +177,7 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context) ([]byte, boo
return terminalBlockHash.Bytes(), true, nil
}
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
}
// This returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx)
}
// activationEpochNotReached returns true if activation epoch has not been reach.
@@ -270,18 +194,6 @@ func activationEpochNotReached(slot types.Slot) bool {
return false
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}
func emptyPayload() *enginev1.ExecutionPayload {
return &enginev1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
types "github.com/prysmaticlabs/eth2-types"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
@@ -22,26 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/util"
)
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestServer_activationEpochNotReached(t *testing.T) {
require.Equal(t, false, activationEpochNotReached(0))
@@ -154,137 +133,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
}
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
},
}
b, e, err := vs.getPowBlockHashAtTerminalTotalDifficulty(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
tests := []struct {
name string

View File

@@ -45,7 +45,7 @@ func (f *blocksFetcher) nonSkippedSlotAfter(ctx context.Context, slot types.Slot
// Exit early if no peers with epoch higher than our known head are found.
if targetEpoch <= headEpoch {
return 0, errors.Wrapf(errSlotIsTooHigh, "no peers with epoch higher than our known head, peer epoch=%d, head=%d", targetEpoch, headEpoch)
return 0, errSlotIsTooHigh
}
// Transform peer list to avoid eclipsing (filter, shuffle, trim).

View File

@@ -2,7 +2,7 @@ package initialsync
import (
"context"
"github.com/pkg/errors"
"errors"
"time"
"github.com/libp2p/go-libp2p-core/peer"
@@ -285,7 +285,7 @@ func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
}
if m.start > q.highestExpectedSlot {
m.setState(stateSkipped)
return m.state, errors.Wrapf(errSlotIsTooHigh, "slot=%d", m.start)
return m.state, errSlotIsTooHigh
}
blocksPerRequest := q.blocksFetcher.blocksPerSecond
if err := q.blocksFetcher.scheduleRequest(ctx, m.start, blocksPerRequest); err != nil {

View File

@@ -3,7 +3,6 @@ package sync
import (
"bytes"
"context"
"fmt"
"sync"
"time"
@@ -308,7 +307,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
return nil
}
if !s.cfg.beaconDB.IsFinalizedBlock(ctx, bytesutil.ToBytes32(msg.FinalizedRoot)) {
return errors.Wrap(p2ptypes.ErrInvalidFinalizedRoot, fmt.Sprintf("root=%#x", msg.FinalizedRoot))
return p2ptypes.ErrInvalidFinalizedRoot
}
blk, err := s.cfg.beaconDB.Block(ctx, bytesutil.ToBytes32(msg.FinalizedRoot))
if err != nil {

View File

@@ -97,15 +97,10 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationIgnore, nil
}
// Check that the block being voted on isn't invalid.
errBadBlockRef := errors.New("bad block referenced in attestation data")
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=BeaconBlockRoot, root=%#x", m.Message.Aggregate.Data.BeaconBlockRoot)
}
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=Target, root=%#x", m.Message.Aggregate.Data.Target.Root)
}
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=Source, root=%#x", m.Message.Aggregate.Data.Source.Root)
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.

View File

@@ -189,8 +189,6 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationAccept, nil
}
var errIncorrectProposerIndex = errors.New("incorrect proposer index")
func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "sync.validateBeaconBlock")
defer span.End()
@@ -222,19 +220,13 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaco
if err != nil {
return err
}
sRoot, err := parentState.HashTreeRoot(ctx)
if err != nil {
log.Errorf("that's weird, htr fail")
}
log.Infof("validating block with slot=%d, state.slot=%d, block_root=%#x, state_root=%#x", blk.Block().Slot(), parentState.Slot(), blockRoot, sRoot)
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
if err != nil {
return err
}
log.Infof("got BeaconProposerIndex=%d, block proposer index=%d", idx, blk.Block().ProposerIndex())
if blk.Block().ProposerIndex() != idx {
s.setBadBlock(ctx, blockRoot)
return errors.Wrapf(errIncorrectProposerIndex, "state slot=%d, root=%#x, block_root=%#x", parentState.Slot(), sRoot, blockRoot)
return errors.New("incorrect proposer index")
}
if err = s.validateBellatrixBeaconBlock(ctx, parentState, blk.Block()); err != nil {

View File

@@ -63,10 +63,7 @@ container_image(
container_bundle(
name = "image_bundle",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
"gcr.io/prysmaticlabs/prysm/beacon-chain:trie-debug": ":image_with_creation_time",
},
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],

View File

@@ -15,7 +15,7 @@ var (
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",
Usage: "A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
Value: "",
Value: "http://localhost:8545",
}
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests

View File

@@ -27,7 +27,7 @@ func FlagOptions(c *cli.Context) ([]powchain.Option, error) {
powchain.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
}
if len(jwtSecret) > 0 {
opts = append(opts, powchain.WithJWTSecret(jwtSecret))
opts = append(opts, powchain.WithHttpEndpointsAndJWTSecret(endpoints, jwtSecret))
}
return opts, nil
}

View File

@@ -48,6 +48,7 @@ go_test(
"@consensus_spec_tests_mainnet//:test_data",
"@consensus_spec_tests_minimal//:test_data",
"@eth2_networks//:configs",
"testdata/e2e_config.yaml",
],
gotags = ["develop"],
race = "on",
@@ -61,3 +62,9 @@ go_test(
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)
filegroup(
name = "custom_configs",
srcs = glob(["testdata/*.yaml"]),
visibility = ["//testing:__subpackages__"],
)

View File

@@ -26,15 +26,6 @@ func BeaconConfig() *BeaconChainConfig {
func OverrideBeaconConfig(c *BeaconChainConfig) {
beaconConfigLock.Lock()
defer beaconConfigLock.Unlock()
c.InitializeForkSchedule()
name, ok := reverseConfigNames[c.ConfigName]
// if name collides with an existing config name, override it, because the fork versions probably conflict
if !ok {
// otherwise define it as the special "Dynamic" name, ie for a config loaded from a file at runtime
name = Dynamic
}
KnownConfigs[name] = func() *BeaconChainConfig { return c }
rebuildKnownForkVersions()
beaconConfig = c
}

View File

@@ -19,15 +19,6 @@ func BeaconConfig() *BeaconChainConfig {
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
// return this new configuration.
func OverrideBeaconConfig(c *BeaconChainConfig) {
c.InitializeForkSchedule()
name, ok := reverseConfigNames[c.ConfigName]
// if name collides with an existing config name, override it, because the fork versions probably conflict
if !ok {
// otherwise define it as the special "Dynamic" name, ie for a config loaded from a file at runtime
name = Dynamic
}
KnownConfigs[name] = func() *BeaconChainConfig { return c }
rebuildKnownForkVersions()
beaconConfig = c
}

View File

@@ -70,6 +70,7 @@ func LoadChainConfigFile(chainConfigFileName string, conf *BeaconChainConfig) {
// recompute SqrRootSlotsPerEpoch constant to handle non-standard values of SlotsPerEpoch
conf.SqrRootSlotsPerEpoch = types.Slot(math.IntegerSquareRoot(uint64(conf.SlotsPerEpoch)))
log.Debugf("Config file values: %+v", conf)
conf.InitializeForkSchedule()
OverrideBeaconConfig(conf)
}

View File

@@ -18,94 +18,94 @@ import (
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT"}
func TestLoadConfigFileMainnet(t *testing.T) {
func TestLoadConfigFile(t *testing.T) {
// See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml
assertVals := func(name string, fields []string, c1, c2 *params.BeaconChainConfig) {
assertVals := func(name string, fields []string, expected, actual *params.BeaconChainConfig) {
// Misc params.
assert.Equal(t, c1.MaxCommitteesPerSlot, c2.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, c1.TargetCommitteeSize, c2.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, c1.MaxValidatorsPerCommittee, c2.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, c1.MinPerEpochChurnLimit, c2.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, c1.ChurnLimitQuotient, c2.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, c1.ShuffleRoundCount, c2.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, c1.MinGenesisActiveValidatorCount, c2.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, c1.MinGenesisTime, c2.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, c1.HysteresisQuotient, c2.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, c1.HysteresisDownwardMultiplier, c2.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, c1.HysteresisUpwardMultiplier, c2.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
assert.Equal(t, expected.MaxCommitteesPerSlot, actual.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, expected.TargetCommitteeSize, actual.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, expected.MaxValidatorsPerCommittee, actual.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, expected.MinPerEpochChurnLimit, actual.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, expected.ChurnLimitQuotient, actual.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, expected.ShuffleRoundCount, actual.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, expected.MinGenesisActiveValidatorCount, actual.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, expected.MinGenesisTime, actual.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, expected.HysteresisQuotient, actual.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
// Fork Choice params.
assert.Equal(t, c1.SafeSlotsToUpdateJustified, c2.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
assert.Equal(t, expected.SafeSlotsToUpdateJustified, actual.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
// Validator params.
assert.Equal(t, c1.Eth1FollowDistance, c2.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, c1.TargetAggregatorsPerCommittee, c2.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, c1.RandomSubnetsPerValidator, c2.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, c1.EpochsPerRandomSubnetSubscription, c2.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, c1.SecondsPerETH1Block, c2.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
assert.Equal(t, expected.Eth1FollowDistance, actual.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, expected.TargetAggregatorsPerCommittee, actual.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, expected.RandomSubnetsPerValidator, actual.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, expected.EpochsPerRandomSubnetSubscription, actual.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, expected.SecondsPerETH1Block, actual.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
// Deposit contract.
assert.Equal(t, c1.DepositChainID, c2.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, c1.DepositNetworkID, c2.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, c1.DepositContractAddress, c2.DepositContractAddress, "%s: DepositContractAddress", name)
assert.Equal(t, expected.DepositChainID, actual.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, expected.DepositNetworkID, actual.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, expected.DepositContractAddress, actual.DepositContractAddress, "%s: DepositContractAddress", name)
// Gwei values.
assert.Equal(t, c1.MinDepositAmount, c2.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, c1.MaxEffectiveBalance, c2.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, c1.EjectionBalance, c2.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, c1.EffectiveBalanceIncrement, c2.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
assert.Equal(t, expected.MinDepositAmount, actual.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, expected.MaxEffectiveBalance, actual.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, expected.EjectionBalance, actual.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
// Initial values.
assert.DeepEqual(t, c1.GenesisForkVersion, c2.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, c1.BLSWithdrawalPrefixByte, c2.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
// Time parameters.
assert.Equal(t, c1.GenesisDelay, c2.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, c1.SecondsPerSlot, c2.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, c1.MinAttestationInclusionDelay, c2.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, c1.SlotsPerEpoch, c2.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, c1.MinSeedLookahead, c2.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, c1.MaxSeedLookahead, c2.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, c1.EpochsPerEth1VotingPeriod, c2.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, c1.SlotsPerHistoricalRoot, c2.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, c1.MinValidatorWithdrawabilityDelay, c2.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, c1.ShardCommitteePeriod, c2.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, c1.MinEpochsToInactivityPenalty, c2.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
assert.Equal(t, expected.GenesisDelay, actual.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, expected.SecondsPerSlot, actual.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, expected.SlotsPerEpoch, actual.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, expected.MinSeedLookahead, actual.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, expected.MaxSeedLookahead, actual.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, expected.EpochsPerEth1VotingPeriod, actual.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, expected.SlotsPerHistoricalRoot, actual.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, expected.MinValidatorWithdrawabilityDelay, actual.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, expected.ShardCommitteePeriod, actual.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, expected.MinEpochsToInactivityPenalty, actual.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
// State vector lengths.
assert.Equal(t, c1.EpochsPerHistoricalVector, c2.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, c1.EpochsPerSlashingsVector, c2.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, c1.HistoricalRootsLimit, c2.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, c1.ValidatorRegistryLimit, c2.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
assert.Equal(t, expected.EpochsPerHistoricalVector, actual.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, expected.EpochsPerSlashingsVector, actual.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, expected.HistoricalRootsLimit, actual.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, expected.ValidatorRegistryLimit, actual.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
// Reward and penalty quotients.
assert.Equal(t, c1.BaseRewardFactor, c2.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, c1.WhistleBlowerRewardQuotient, c2.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, c1.ProposerRewardQuotient, c2.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotient, c2.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotientAltair, c2.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotient, c2.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotientAltair, c2.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, c1.ProportionalSlashingMultiplier, c2.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, c1.ProportionalSlashingMultiplierAltair, c2.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
assert.Equal(t, expected.BaseRewardFactor, actual.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, expected.WhistleBlowerRewardQuotient, actual.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, expected.ProposerRewardQuotient, actual.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotient, actual.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotientAltair, actual.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotient, actual.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotientAltair, actual.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, expected.ProportionalSlashingMultiplier, actual.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, expected.ProportionalSlashingMultiplierAltair, actual.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
// Max operations per block.
assert.Equal(t, c1.MaxProposerSlashings, c2.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, c1.MaxAttesterSlashings, c2.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, c1.MaxAttestations, c2.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, c1.MaxDeposits, c2.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, c1.MaxVoluntaryExits, c2.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
assert.Equal(t, expected.MaxProposerSlashings, actual.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, expected.MaxAttesterSlashings, actual.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, expected.MaxAttestations, actual.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, expected.MaxDeposits, actual.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
// Signature domains.
assert.Equal(t, c1.DomainBeaconProposer, c2.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, c1.DomainBeaconAttester, c2.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, c1.DomainRandao, c2.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, c1.DomainDeposit, c2.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, c1.DomainVoluntaryExit, c2.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, c1.DomainSelectionProof, c2.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, c1.DomainAggregateAndProof, c2.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assert.Equal(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, expected.DomainRandao, actual.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, expected.DomainDeposit, actual.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assertYamlFieldsMatch(t, name, fields, c1, c2)
assertYamlFieldsMatch(t, name, fields, expected, actual)
}
t.Run("mainnet", func(t *testing.T) {
@@ -129,6 +129,17 @@ func TestLoadConfigFileMainnet(t *testing.T) {
fields := fieldsFromYamls(t, append(minimalPresetsFiles, minimalConfigFile))
assertVals("minimal", fields, params.MinimalSpecConfig(), params.BeaconConfig())
})
t.Run("e2e", func(t *testing.T) {
minimalPresetsFiles := presetsFilePath(t, "minimal")
for _, fp := range minimalPresetsFiles {
params.LoadChainConfigFile(fp, nil)
}
configFile := "testdata/e2e_config.yaml"
params.LoadChainConfigFile(configFile, nil)
fields := fieldsFromYamls(t, append(minimalPresetsFiles, configFile))
assertVals("e2e", fields, params.E2ETestConfig(), params.BeaconConfig())
})
}
func TestLoadConfigFile_OverwriteCorrectly(t *testing.T) {

118
config/params/testdata/e2e_config.yaml vendored Normal file
View File

@@ -0,0 +1,118 @@
# e2e config
# Extends the minimal preset
PRESET_BASE: 'minimal'
# Transition
# ---------------------------------------------------------------
# TBD, 2**256-2**10 is a placeholder, e2e is 600
TERMINAL_TOTAL_DIFFICULTY: 600
# By default, don't use these params
#TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
#TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# [customized]
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 256 # Override for e2e tests
# Jan 3, 2020
MIN_GENESIS_TIME: 1578009600
# Highest byte set to 0x01 to avoid collisions with mainnet versioning
GENESIS_FORK_VERSION: 0x000000fd
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
GENESIS_DELAY: 10 # Override for e2e tests
# Forking
# ---------------------------------------------------------------
# Values provided for illustrative purposes.
# Individual tests/testnets may set different values.
# Altair
ALTAIR_FORK_VERSION: 0x010000fd
ALTAIR_FORK_EPOCH: 6 # Override for e2e
# Bellatrix
BELLATRIX_FORK_VERSION: 0x020000fd
BELLATRIX_FORK_EPOCH: 8
# Sharding
SHARDING_FORK_VERSION: 0x030000fd
SHARDING_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# [customized] Faster for testing purposes
SECONDS_PER_SLOT: 10 # Override for e2e tests
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 2 # Override for e2e tests
# 2**8 (= 256) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
SHARD_COMMITTEE_PERIOD: 4 # Override for e2e tests
# [customized] process deposits more quickly, but insecure
ETH1_FOLLOW_DISTANCE: 4 # Override for e2e tests
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# [customized] scale queue churn at much lower validator counts for testing
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 70%
PROPOSER_SCORE_BOOST: 70
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 1337 # Override for e2e tests
DEPOSIT_NETWORK_ID: 1337 # Override for e2e tests
# Configured on a per testnet basis
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# Updated penalty values
# ---------------------------------------------------------------
# 3 * 2**24 (= 50,331,648)
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
# 2**6 (= 64)
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
# 2
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
# Sync committee
# ---------------------------------------------------------------
# [customized]
SYNC_COMMITTEE_SIZE: 32
# [customized]
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# Sync protocol
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
# Other e2e overrides
# ---------------------------------------------------------------
CONFIG_NAME: "end-to-end"
SLOTS_PER_EPOCH: 6
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
MAX_SEED_LOOKAHEAD: 1

View File

@@ -45,7 +45,7 @@ func E2ETestConfig() *BeaconChainConfig {
e2eConfig.DepositChainID = 1337 // Chain ID of eth1 dev net.
e2eConfig.DepositNetworkID = 1337 // Network ID of eth1 dev net.
// Altair Fork Parameters.
// Fork Parameters.
e2eConfig.AltairForkEpoch = altairE2EForkEpoch
e2eConfig.BellatrixForkEpoch = bellatrixE2EForkEpoch

View File

@@ -15,7 +15,6 @@ const (
Pyrmont
Prater
EndToEndMainnet
Dynamic
)
// ConfigName enum describes the type of known network in use.
@@ -37,9 +36,7 @@ var ConfigNames = map[ConfigName]string{
Pyrmont: "pyrmont",
Prater: "prater",
EndToEndMainnet: "end-to-end-mainnet",
Dynamic: "dynamic",
}
var reverseConfigNames map[string]ConfigName
// KnownConfigs provides an index of all known BeaconChainConfig values.
var KnownConfigs = map[ConfigName]func() *BeaconChainConfig{
@@ -66,18 +63,6 @@ func ConfigForVersion(version [fieldparams.VersionLength]byte) (*BeaconChainConf
}
func init() {
rebuildKnownForkVersions()
buildReverseConfigName()
}
func buildReverseConfigName() {
reverseConfigNames = make(map[string]ConfigName)
for cn, s := range ConfigNames {
reverseConfigNames[s] = cn
}
}
func rebuildKnownForkVersions() {
knownForkVersions = make(map[[fieldparams.VersionLength]byte]ConfigName)
for n, cfunc := range KnownConfigs {
cfg := cfunc()

View File

@@ -1274,12 +1274,6 @@ def prysm_deps():
sum = "h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=",
version = "v0.0.0-20181222135242-d2cdd8c08219",
)
go_repository(
name = "com_github_gomarkdown_markdown",
importpath = "github.com/gomarkdown/markdown",
sum = "h1:YVvt637ygnOO9qjLBVmPOvrUmCz/i8YECSu/8UlOQW0=",
version = "v0.0.0-20220310201231-552c6011c0b8",
)
go_repository(
name = "com_github_google_btree",

View File

@@ -81,40 +81,40 @@ func FromForkVersion(cv [fieldparams.VersionLength]byte) (*VersionedUnmarshaler,
// UnmarshalBeaconState uses internal knowledge in the VersionedUnmarshaler to pick the right concrete BeaconState type,
// then Unmarshal()s the type and returns an instance of state.BeaconState if successful.
func (cf *VersionedUnmarshaler) UnmarshalBeaconState(marshaled []byte) (s state.BeaconState, err error) {
info := fmt.Sprintf("fork=%s, config=%s", version.String(cf.Fork), cf.Config.ConfigName)
forkName := version.String(cf.Fork)
switch fork := cf.Fork; fork {
case version.Phase0:
st := &ethpb.BeaconState{}
err = st.UnmarshalSSZ(marshaled)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to unmarshal state, detected fork=%s", forkName)
}
s, err = v1.InitializeFromProtoUnsafe(st)
if err != nil {
return nil, errors.Wrapf(err, "failed to init state trie from state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to init state trie from state, detected fork=%s", forkName)
}
case version.Altair:
st := &ethpb.BeaconStateAltair{}
err = st.UnmarshalSSZ(marshaled)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to unmarshal state, detected fork=%s", forkName)
}
s, err = v2.InitializeFromProtoUnsafe(st)
if err != nil {
return nil, errors.Wrapf(err, "failed to init state trie from state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to init state trie from state, detected fork=%s", forkName)
}
case version.Bellatrix:
st := &ethpb.BeaconStateBellatrix{}
err = st.UnmarshalSSZ(marshaled)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to unmarshal state, detected fork=%s", forkName)
}
s, err = v3.InitializeFromProtoUnsafe(st)
if err != nil {
return nil, errors.Wrapf(err, "failed to init state trie from state, detected info=%s", info)
return nil, errors.Wrapf(err, "failed to init state trie from state, detected fork=%s", forkName)
}
default:
return nil, fmt.Errorf("unable to initialize BeaconState for info=%s", info)
return nil, fmt.Errorf("unable to initialize BeaconState for fork version=%s", forkName)
}
return s, nil

1
go.mod
View File

@@ -255,7 +255,6 @@ require (
github.com/go-logr/logr v0.2.1 // indirect
github.com/go-ole/go-ole v1.2.5 // indirect
github.com/go-playground/validator/v10 v10.10.0
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8
github.com/peterh/liner v1.2.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220303211031-f753e083138c

2
go.sum
View File

@@ -447,8 +447,6 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8 h1:YVvt637ygnOO9qjLBVmPOvrUmCz/i8YECSu/8UlOQW0=
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=

View File

@@ -3,24 +3,32 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"endpoint.go",
"external_ip.go",
],
importpath = "github.com/prysmaticlabs/prysm/network",
visibility = ["//visibility:public"],
deps = ["//network/authorization:go_default_library"],
deps = [
"//network/authorization:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"auth_test.go",
"endpoint_test.go",
"external_ip_test.go",
],
embed = [":go_default_library"],
deps = [
"//encoding/bytesutil:go_default_library",
"//network/authorization:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
],
)

View File

@@ -1,4 +1,4 @@
package powchain
package network
import (
"net/http"
@@ -8,6 +8,9 @@ import (
"github.com/pkg/errors"
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
// This creates a custom HTTP transport which we can attach to our HTTP client
// in order to inject JWT auth strings into our HTTP request headers. Authentication
// is required when interacting with an Ethereum engine API server via HTTP, and JWT

View File

@@ -1,4 +1,4 @@
package powchain
package network
import (
"net/http"

View File

@@ -2,6 +2,7 @@ package network
import (
"errors"
"net/http"
"strings"
"github.com/prysmaticlabs/prysm/network/authorization"
@@ -24,6 +25,22 @@ func (e Endpoint) Equals(other Endpoint) bool {
return e.Url == other.Url && e.Auth.Equals(other.Auth)
}
// HttpClient creates a http client object dependant
// on the properties of the network endpoint.
func (e Endpoint) HttpClient() *http.Client {
if e.Auth.Method != authorization.Bearer {
return http.DefaultClient
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: []byte(e.Auth.Value),
}
return &http.Client{
Timeout: DefaultRPCHTTPTimeout,
Transport: authTransport,
}
}
// Equals compares two authorization data objects for equality.
func (d AuthorizationData) Equals(other AuthorizationData) bool {
return d.Method == other.Method && d.Value == other.Value

File diff suppressed because it is too large Load Diff

View File

@@ -123,6 +123,92 @@ func local_request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshale
}
func request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := client.ListRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_ListRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := server.ListRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
func request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ImportRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ImportRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_ImportRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ImportRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ImportRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
func request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.DeleteRemoteKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRemoteKeysRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.DeleteRemoteKeys(ctx, &protoReq)
return msg, metadata, err
}
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
// UnaryRPC :call KeyManagementServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -198,6 +284,75 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -299,6 +454,66 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
})
mux.Handle("GET", pattern_KeyManagement_ListRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_ListRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ListRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KeyManagement_ImportRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ImportRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_ImportRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_ImportRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_KeyManagement_DeleteRemoteKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteRemoteKeys")
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KeyManagement_DeleteRemoteKeys_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KeyManagement_DeleteRemoteKeys_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -308,6 +523,12 @@ var (
pattern_KeyManagement_ImportKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
pattern_KeyManagement_DeleteKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "keystores"}, ""))
pattern_KeyManagement_ListRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
pattern_KeyManagement_ImportRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
pattern_KeyManagement_DeleteRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
)
var (
@@ -316,4 +537,10 @@ var (
forward_KeyManagement_ImportKeystores_0 = runtime.ForwardResponseMessage
forward_KeyManagement_DeleteKeystores_0 = runtime.ForwardResponseMessage
forward_KeyManagement_ListRemoteKeys_0 = runtime.ForwardResponseMessage
forward_KeyManagement_ImportRemoteKeys_0 = runtime.ForwardResponseMessage
forward_KeyManagement_DeleteRemoteKeys_0 = runtime.ForwardResponseMessage
)

View File

@@ -84,6 +84,27 @@ service KeyManagement {
body: "*"
};
}
rpc ListRemoteKeys(google.protobuf.Empty) returns (ListRemoteKeysResponse) {
option (google.api.http) = {
get: "/internal/eth/v1/remotekeys"
};
}
rpc ImportRemoteKeys(ImportRemoteKeysRequest) returns (ImportRemoteKeysResponse) {
option (google.api.http) = {
post: "/internal/eth/v1/remotekeys",
body: "*"
};
}
rpc DeleteRemoteKeys(DeleteRemoteKeysRequest) returns (DeleteRemoteKeysResponse) {
option (google.api.http) = {
delete: "/internal/eth/v1/remotekeys",
body: "*"
};
}
}
message ListKeystoresResponse {
@@ -133,3 +154,55 @@ message DeletedKeystoreStatus {
Status status = 1;
string message = 2;
}
message ListRemoteKeysResponse {
message Keystore {
bytes pubkey = 1;
string url = 2;
bool readonly = 3;
}
repeated Keystore data = 1;
}
message ImportRemoteKeysRequest {
message Keystore {
bytes pubkey = 1;
string url = 2;
}
repeated Keystore remote_keys = 1;
}
message ImportRemoteKeysResponse {
repeated ImportedRemoteKeysStatus data = 1;
}
message DeleteRemoteKeysRequest {
repeated bytes pubkeys = 1;
}
message DeleteRemoteKeysResponse {
repeated DeletedRemoteKeysStatus data = 1;
}
message ImportedRemoteKeysStatus {
enum Status {
UNKNOWN = 0;
IMPORTED = 1;
DUPLICATE = 2;
ERROR = 3;
}
Status status = 1;
string message = 2;
}
message DeletedRemoteKeysStatus {
enum Status {
NOT_FOUND = 0;
DELETED = 1;
ERROR = 3; // skips 2 to match Delete KeyStore status which has error = 3.
}
Status status = 1;
string message = 2;
}

File diff suppressed because it is too large Load Diff

View File

@@ -360,6 +360,9 @@ message StreamBlocksResponse {
// Representing an altair block.
SignedBeaconBlockAltair altair_block = 2;
// Representing a bellatrix block.
SignedBeaconBlockBellatrix bellatrix_block = 3;
}
}

View File

@@ -7,7 +7,6 @@ go_test(
size = "large",
testonly = True,
srcs = [
"checkpoint_sync_test.go",
"endtoend_test.go",
"minimal_e2e_test.go",
"minimal_slashing_e2e_test.go",
@@ -18,6 +17,7 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -31,23 +31,18 @@ go_test(
"requires-network",
],
deps = [
"//api/client/beacon:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/endtoend/components:go_default_library",
"//testing/endtoend/components/eth1:go_default_library",
"//testing/endtoend/e2ez:go_default_library",
"//testing/endtoend/evaluators:go_default_library",
"//testing/endtoend/helpers:go_default_library",
"//testing/endtoend/params:go_default_library",
@@ -60,8 +55,6 @@ go_test(
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
@@ -80,6 +73,7 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -93,19 +87,14 @@ go_test(
"requires-network",
],
deps = [
"//api/client/beacon:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/endtoend/components:go_default_library",
@@ -117,14 +106,11 @@ go_test(
"//testing/require:go_default_library",
"//testing/slasher/simulator:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],

View File

@@ -1,93 +0,0 @@
package endtoend
import (
"fmt"
"os"
"strconv"
"testing"
e2types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
e2eParams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
)
// This test customizes the minimal config in order to artificially shorten the weak subjectivity period
// so that the state used will not be genesis despite there only being 10 epochs of history.
func TestCheckpointSync_CustomConfig(t *testing.T) {
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
cfg := params.E2ETestConfig()
cfg.BellatrixForkEpoch = 10000
// setting this to 1 should change the weak subjectivity computation,
// so the computed weak subjectivity checkpoint will just be a few epochs before head
cfg.MinValidatorWithdrawabilityDelay = e2types.Epoch(epochsToRun / 2)
cfg.SlotsPerEpoch = 6
cfg.SecondsPerSlot = 6
params.OverrideBeaconConfig(cfg)
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
seed := 0
seedStr, isValid := os.LookupEnv("E2E_SEED")
if isValid {
seed, err = strconv.Atoi(seedStr)
require.NoError(t, err)
}
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
evals := []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsAreActive,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.PeersCheck,
ev.ProcessesDepositsInBlocks,
ev.VerifyBlockGraffiti,
ev.ActivatesDepositedValidators,
ev.DepositedValidatorsAreActive,
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ValidatorsVoteWithTheMajority,
ev.ColdStateCheckpoint,
ev.ForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
fmt.Sprintf("--slots-per-archive-point=%d", params.BeaconConfig().SlotsPerEpoch*16),
fmt.Sprintf("--tracing-endpoint=http://%s", tracingEndpoint),
"--enable-tracing",
"--trace-sample-fraction=1.0",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestFeature: true,
TestDeposits: true,
UseFixedPeerIDs: true,
UsePrysmShValidator: false,
UsePprof: !longRunning,
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
Seed: int64(seed),
BeaconChainConfig: cfg,
LeaveRunning: false,
}
newTestRunner(t, testConfig).run()
}

View File

@@ -31,7 +31,6 @@ go_library(
"//io/file:go_default_library",
"//runtime/interop:go_default_library",
"//testing/endtoend/components/eth1:go_default_library",
"//testing/endtoend/e2ez:go_default_library",
"//testing/endtoend/helpers:go_default_library",
"//testing/endtoend/params:go_default_library",
"//testing/endtoend/types:go_default_library",
@@ -55,11 +54,15 @@ go_test(
name = "go_default_test",
size = "small",
srcs = ["web3remotesigner_test.go"],
data = ["@web3signer"],
data = [
"//config/params:custom_configs",
"@web3signer",
],
deps = [
":go_default_library",
"//config/params:go_default_library",
"//testing/endtoend/params:go_default_library",
"//testing/require:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)

View File

@@ -3,24 +3,20 @@
package components
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"text/template"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/pkg/errors"
cmdshared "github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/e2ez"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
@@ -28,6 +24,7 @@ import (
var _ e2etypes.ComponentRunner = (*BeaconNode)(nil)
var _ e2etypes.ComponentRunner = (*BeaconNodeSet)(nil)
var _ e2etypes.BeaconNodeSet = (*BeaconNodeSet)(nil)
// BeaconNodeSet represents set of beacon nodes.
type BeaconNodeSet struct {
@@ -36,37 +33,19 @@ type BeaconNodeSet struct {
enr string
ids []string
started chan struct{}
nodes []*BeaconNode
flags []string
zp *e2ez.Server
}
// SetENR assigns ENR to the set of beacon nodes.
func (s *BeaconNodeSet) SetENR(enr string) {
s.enr = enr
}
// NewBeaconNodes creates and returns a set of beacon nodes.
func NewBeaconNodes(config *e2etypes.E2EConfig, enr string, flags []string, zp *e2ez.Server) *BeaconNodeSet {
// Create beacon nodes.
nodes := make([]*BeaconNode, e2e.TestParams.BeaconNodeCount)
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
nodes[i] = NewBeaconNode(i, enr, flags, config)
zp.HandleZPages(nodes[i])
}
bns := &BeaconNodeSet{
func NewBeaconNodes(config *e2etypes.E2EConfig) *BeaconNodeSet {
return &BeaconNodeSet{
config: config,
started: make(chan struct{}, 1),
nodes: nodes,
enr: enr,
flags: flags,
zp: zp,
}
zp.HandleZPages(bns)
return bns
}
func (s *BeaconNodeSet) AddBeaconNode(index int, flags []string) *BeaconNode {
bn := NewBeaconNode(index, s.enr, flags, s.config)
s.nodes = append(s.nodes, bn)
s.zp.HandleZPages(bn)
return bn
}
// Start starts all the beacon nodes in set.
@@ -75,9 +54,10 @@ func (s *BeaconNodeSet) Start(ctx context.Context) error {
return errors.New("empty ENR")
}
nodes := make([]e2etypes.ComponentRunner, len(s.nodes))
for i, n := range s.nodes {
nodes[i] = n
// Create beacon nodes.
nodes := make([]e2etypes.ComponentRunner, e2e.TestParams.BeaconNodeCount)
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
nodes[i] = NewBeaconNode(s.config, i, s.enr)
}
// Wait for all nodes to finish their job (blocking).
@@ -94,23 +74,6 @@ func (s *BeaconNodeSet) Start(ctx context.Context) error {
})
}
func (s *BeaconNodeSet) ZPath() string {
return "/beacon-nodes"
}
func (s *BeaconNodeSet) ZMarkdown() (string, error) {
tmpl := `
%d beacon nodes
---------------
%s`
nodeList := ""
for _, node := range s.nodes {
nodeList = nodeList + fmt.Sprintf("\n - [beacon node #%d](%s)", node.index, node.ZPath())
}
return fmt.Sprintf(tmpl, len(s.nodes), nodeList), nil
}
// Started checks whether beacon node set is started and all nodes are ready to be queried.
func (s *BeaconNodeSet) Started() <-chan struct{} {
return s.started
@@ -122,97 +85,48 @@ type BeaconNode struct {
config *e2etypes.E2EConfig
started chan struct{}
index int
flags []string
enr string
peerID string
}
func (node *BeaconNode) ZPath() string {
return fmt.Sprintf("/beacon-node/%d", node.index)
}
var bnzm = template.Must(template.New("BeaconNode.ZMarkdown").Parse("" +
"beacon node {{.Index}}\n" +
"--------------\n\n" +
"```\n" +
"{{.StartCmd}}" +
"```\n\n" +
"http addr={{.HTTPAddr}}\n\n" +
"grpc addr={{.GRPCAddr}}\n\n" +
"db path={{.DBPath}}\n\n" +
"log path={{.LogPath}}\n\n" +
"stdout path={{.StdoutPath}}\n\n" +
"stderr path={{.StderrPath}}\n\n"))
func (node *BeaconNode) ZMarkdown() (string, error) {
bin, args, err := node.startCommand()
if err != nil {
return "", err
}
cmd := path.Join(bin, args[0])
for _, a := range args {
cmd += fmt.Sprintf("\n%s \\", a)
}
buf := bytes.NewBuffer(nil)
err = bnzm.Execute(buf, struct {
Index int
StartCmd string
DBPath string
LogPath string
StdoutPath string
StderrPath string
HTTPAddr string
GRPCAddr string
}{
Index: node.index,
StartCmd: cmd,
DBPath: node.dbPath(),
LogPath: node.logPath(),
StdoutPath: node.stdoutPath(),
StderrPath: node.stderrPath(),
HTTPAddr: node.httpAddr(),
GRPCAddr: node.grpcAddr(),
})
return buf.String(), err
}
var _ e2ez.ZPage = &BeaconNode{}
// NewBeaconNode creates and returns a beacon node.
func NewBeaconNode(index int, enr string, flags []string, config *e2etypes.E2EConfig) *BeaconNode {
func NewBeaconNode(config *e2etypes.E2EConfig, index int, enr string) *BeaconNode {
return &BeaconNode{
config: config,
index: index,
enr: enr,
started: make(chan struct{}, 1),
flags: flags,
}
}
func (node *BeaconNode) startCommand() (string, []string, error) {
// Start starts a fresh beacon node, connecting to all passed in beacon nodes.
func (node *BeaconNode) Start(ctx context.Context) error {
binaryPath, found := bazel.FindBinary("cmd/beacon-chain", "beacon-chain")
if !found {
log.Info(binaryPath)
return "", []string{}, errors.New("beacon chain binary not found")
return errors.New("beacon chain binary not found")
}
config, index, enr := node.config, node.index, node.enr
expectedNumOfPeers := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, index))
if err != nil {
return err
}
expectedNumOfPeers := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount - 1
if node.config.TestSync {
expectedNumOfPeers += 1
}
expectedNumOfPeers += 10
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index)+"/")
if index == 0 {
jwtPath = path.Join(e2e.TestParams.TestPath, "eth1data/miner/")
}
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
args := []string{
fmt.Sprintf("--%s=%s", cmdshared.DataDirFlag.Name, node.dbPath()),
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, node.logPath()),
fmt.Sprintf("--%s=%s/eth2-beacon-node-%d", cmdshared.DataDirFlag.Name, e2e.TestParams.TestPath, index),
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+index),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1RPCPort+index),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1AuthRPCPort+index),
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),
@@ -225,9 +139,8 @@ func (node *BeaconNode) startCommand() (string, []string, error) {
fmt.Sprintf("--%s=%d", cmdshared.RPCMaxPageSizeFlag.Name, params.BeaconConfig().MinGenesisActiveValidatorCount),
fmt.Sprintf("--%s=%s", cmdshared.BootstrapNode.Name, enr),
fmt.Sprintf("--%s=%s", cmdshared.VerbosityFlag.Name, "debug"),
fmt.Sprintf("--%s=%s", cmdshared.ChainConfigFileFlag.Name, node.config.BeaconChainConfigPath()),
"--slots-per-archive-point=1",
"--" + cmdshared.ForceClearDB.Name,
"--" + cmdshared.E2EConfigFlag.Name,
"--" + cmdshared.AcceptTosFlag.Name,
"--" + flags.EnableDebugRPCEndpoints.Name,
}
@@ -240,55 +153,14 @@ func (node *BeaconNode) startCommand() (string, []string, error) {
args = append(args, features.E2EBeaconChainFlags...)
}
args = append(args, config.BeaconFlags...)
args = append(args, node.flags...)
return binaryPath, args, nil
}
func (node *BeaconNode) dbPath() string {
return fmt.Sprintf("%s/eth2-beacon-node-%d", e2e.TestParams.TestPath, node.index)
}
func (node *BeaconNode) logPath() string {
return filepath.Clean(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, node.index)))
}
func (node *BeaconNode) stdoutPath() string {
return path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stdout.log", node.index))
}
func (node *BeaconNode) stderrPath() string {
return path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stderr.log", node.index))
}
func (node *BeaconNode) httpAddr() string {
port := e2e.TestParams.Ports.PrysmBeaconNodeGatewayPort + node.index
return fmt.Sprintf("http://localhost:%d", port)
}
func (node *BeaconNode) grpcAddr() string {
port := e2e.TestParams.Ports.PrysmBeaconNodeRPCPort + node.index
return fmt.Sprintf("localhost:%d", port)
}
// Start starts a fresh beacon node, connecting to all passed in beacon nodes.
func (node *BeaconNode) Start(ctx context.Context) error {
stdOutFile, err := helpers.DeleteAndCreateFile(node.logPath(), "")
if err != nil {
return err
}
bin, args, err := node.startCommand()
if err != nil {
return errors.Wrap(err, "filed to generate start command")
}
cmd := exec.CommandContext(ctx, bin, args...) // #nosec G204 -- Safe
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
// Write stdout and stderr to log files.
stdout, err := os.Create(node.stdoutPath())
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stdout.log", index)))
if err != nil {
return err
}
stderr, err := os.Create(node.stderrPath())
stderr, err := os.Create(path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stderr.log", index)))
if err != nil {
return err
}
@@ -302,16 +174,16 @@ func (node *BeaconNode) Start(ctx context.Context) error {
}()
cmd.Stdout = stdout
cmd.Stderr = stderr
log.Infof("Starting beacon chain %d with flags: %s", node.index, strings.Join(args[2:], " "))
log.Infof("Starting beacon chain %d with flags: %s", index, strings.Join(args[2:], " "))
if err = cmd.Start(); err != nil {
return fmt.Errorf("failed to start beacon node: %w", err)
}
if err = helpers.WaitForTextInFile(stdOutFile, "gRPC server listening on port"); err != nil {
return fmt.Errorf("could not find multiaddr for node %d, this means the node had issues starting: %w", node.index, err)
return fmt.Errorf("could not find multiaddr for node %d, this means the node had issues starting: %w", index, err)
}
if node.config.UseFixedPeerIDs {
if config.UseFixedPeerIDs {
peerId, err := helpers.FindFollowingTextInFile(stdOutFile, "Running node with peer id of ")
if err != nil {
return fmt.Errorf("could not find peer id: %w", err)
@@ -329,7 +201,3 @@ func (node *BeaconNode) Start(ctx context.Context) error {
func (node *BeaconNode) Started() <-chan struct{} {
return node.started
}
func (node *BeaconNode) Index() int {
return node.index
}

View File

@@ -9,7 +9,6 @@ import (
"strings"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
@@ -49,12 +48,10 @@ func (node *BootNode) Start(ctx context.Context) error {
return err
}
cfg := params.BeaconConfig()
args := []string{
fmt.Sprintf("--log-file=%s", stdOutFile.Name()),
fmt.Sprintf("--discv5-port=%d", e2e.TestParams.Ports.BootNodePort),
fmt.Sprintf("--metrics-port=%d", e2e.TestParams.Ports.BootNodeMetricsPort),
fmt.Sprintf("--fork-version=%#x", cfg.GenesisForkVersion),
"--debug",
}

View File

@@ -1,7 +1,6 @@
package components
import (
"bytes"
"context"
"errors"
"fmt"
@@ -11,12 +10,10 @@ import (
"path/filepath"
"strconv"
"strings"
"text/template"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/testing/endtoend/e2ez"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
@@ -24,6 +21,7 @@ import (
var _ e2etypes.ComponentRunner = (*LighthouseBeaconNode)(nil)
var _ e2etypes.ComponentRunner = (*LighthouseBeaconNodeSet)(nil)
var _ e2etypes.BeaconNodeSet = (*LighthouseBeaconNodeSet)(nil)
// LighthouseBeaconNodeSet represents set of lighthouse beacon nodes.
type LighthouseBeaconNodeSet struct {
@@ -31,26 +29,19 @@ type LighthouseBeaconNodeSet struct {
config *e2etypes.E2EConfig
enr string
started chan struct{}
nodes []*LighthouseBeaconNode
zp *e2ez.Server
}
// SetENR assigns ENR to the set of beacon nodes.
func (s *LighthouseBeaconNodeSet) SetENR(enr string) {
s.enr = enr
}
// NewLighthouseBeaconNodes creates and returns a set of lighthouse beacon nodes.
func NewLighthouseBeaconNodes(config *e2etypes.E2EConfig, enr string, zp *e2ez.Server) *LighthouseBeaconNodeSet {
nodes := make([]*LighthouseBeaconNode, e2e.TestParams.LighthouseBeaconNodeCount)
for i := 0; i < e2e.TestParams.LighthouseBeaconNodeCount; i++ {
nodes[i] = NewLighthouseBeaconNode(config, i, enr)
zp.HandleZPages(nodes[i])
}
bns := &LighthouseBeaconNodeSet{
func NewLighthouseBeaconNodes(config *e2etypes.E2EConfig) *LighthouseBeaconNodeSet {
return &LighthouseBeaconNodeSet{
config: config,
started: make(chan struct{}, 1),
enr: enr,
nodes: nodes,
zp: zp,
}
zp.HandleZPages(bns)
return bns
}
// Start starts all the beacon nodes in set.
@@ -60,9 +51,9 @@ func (s *LighthouseBeaconNodeSet) Start(ctx context.Context) error {
}
// Create beacon nodes.
nodes := make([]e2etypes.ComponentRunner, len(s.nodes))
nodes := make([]e2etypes.ComponentRunner, e2e.TestParams.LighthouseBeaconNodeCount)
for i := 0; i < e2e.TestParams.LighthouseBeaconNodeCount; i++ {
nodes[i] = s.nodes[i]
nodes[i] = NewLighthouseBeaconNode(s.config, i, s.enr)
}
// Wait for all nodes to finish their job (blocking).
@@ -78,23 +69,6 @@ func (s *LighthouseBeaconNodeSet) Started() <-chan struct{} {
return s.started
}
func (s *LighthouseBeaconNodeSet) ZPath() string {
return "/lh-beacon-nodes"
}
func (s *LighthouseBeaconNodeSet) ZMarkdown() (string, error) {
tmpl := `
%d beacon nodes
---------------
%s`
nodeList := ""
for _, node := range s.nodes {
nodeList = nodeList + fmt.Sprintf("\n - [beacon node #%d](%s)", node.index, node.ZPath())
}
return fmt.Sprintf(tmpl, len(s.nodes), nodeList), nil
}
// LighthouseBeaconNode represents a lighthouse beacon node.
type LighthouseBeaconNode struct {
e2etypes.ComponentRunner
@@ -114,51 +88,39 @@ func NewLighthouseBeaconNode(config *e2etypes.E2EConfig, index int, enr string)
}
}
func (node *LighthouseBeaconNode) dbPath() string {
return fmt.Sprintf("%s/lighthouse-beacon-node-%d", e2e.TestParams.TestPath, node.index)
}
func (node *LighthouseBeaconNode) stdoutPath() string {
return path.Join(e2e.TestParams.LogPath, fmt.Sprintf("lighthouse_beacon_node_%d_stdout.log", node.index))
}
func (node *LighthouseBeaconNode) stderrPath() string {
return path.Join(e2e.TestParams.LogPath, fmt.Sprintf("lighthouse_beacon_node_%d_stderr.log", node.index))
}
func (node *LighthouseBeaconNode) httpPort() int {
return e2e.TestParams.Ports.LighthouseBeaconNodeHTTPPort + node.index
}
func (node *LighthouseBeaconNode) startCommand() (string, []string, error) {
// Start starts a fresh beacon node, connecting to all passed in beacon nodes.
func (node *LighthouseBeaconNode) Start(ctx context.Context) error {
binaryPath, found := bazel.FindBinary("external/lighthouse", "lighthouse")
if !found {
log.Info(binaryPath)
log.Error("beacon chain binary not found")
}
testDir, err := node.createTestnetDir(node.index)
_, index, _ := node.config, node.index, node.enr
testDir, err := node.createTestnetDir(index)
if err != nil {
return "", []string{}, err
return err
}
prysmNodeCount := e2e.TestParams.BeaconNodeCount
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index+prysmNodeCount)+"/")
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
args := []string{
"beacon_node",
fmt.Sprintf("--datadir=%s", node.dbPath()),
fmt.Sprintf("--datadir=%s/lighthouse-beacon-node-%d", e2e.TestParams.TestPath, index),
fmt.Sprintf("--testnet-dir=%s", testDir),
"--staking",
"--enr-address=127.0.0.1",
fmt.Sprintf("--enr-udp-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+node.index),
fmt.Sprintf("--enr-tcp-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+node.index),
fmt.Sprintf("--port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+node.index),
fmt.Sprintf("--http-port=%d", node.httpPort()),
fmt.Sprintf("--enr-udp-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+index),
fmt.Sprintf("--enr-tcp-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+index),
fmt.Sprintf("--port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeP2PPort+index),
fmt.Sprintf("--http-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeHTTPPort+index),
fmt.Sprintf("--target-peers=%d", 10),
fmt.Sprintf("--eth1-endpoints=http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1RPCPort+prysmNodeCount+node.index),
fmt.Sprintf("--execution-endpoints=http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1AuthRPCPort+prysmNodeCount+node.index),
fmt.Sprintf("--eth1-endpoints=http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1RPCPort+prysmNodeCount+index),
fmt.Sprintf("--execution-endpoints=http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1AuthRPCPort+prysmNodeCount+index),
fmt.Sprintf("--jwt-secrets=%s", jwtPath),
fmt.Sprintf("--boot-nodes=%s", node.enr),
fmt.Sprintf("--metrics-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeMetricsPort+node.index),
fmt.Sprintf("--metrics-port=%d", e2e.TestParams.Ports.LighthouseBeaconNodeMetricsPort+index),
"--metrics",
"--http",
"--http-allow-sync-stalled",
@@ -171,22 +133,13 @@ func (node *LighthouseBeaconNode) startCommand() (string, []string, error) {
args = append(args,
fmt.Sprintf("--trusted-peers=%s", flagVal))
}
return binaryPath, args, nil
}
// Start starts a fresh beacon node, connecting to all passed in beacon nodes.
func (node *LighthouseBeaconNode) Start(ctx context.Context) error {
binaryPath, args, err := node.startCommand()
if err != nil {
return err
}
cmd := exec.CommandContext(ctx, binaryPath, args...) /* #nosec G204 */
// Write stdout and stderr to log files.
stdout, err := os.Create(node.stdoutPath())
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, fmt.Sprintf("lighthouse_beacon_node_%d_stdout.log", index)))
if err != nil {
return err
}
stderr, err := os.Create(node.stderrPath())
stderr, err := os.Create(path.Join(e2e.TestParams.LogPath, fmt.Sprintf("lighthouse_beacon_node_%d_stderr.log", index)))
if err != nil {
return err
}
@@ -200,13 +153,13 @@ func (node *LighthouseBeaconNode) Start(ctx context.Context) error {
}()
cmd.Stdout = stdout
cmd.Stderr = stderr
log.Infof("Starting lighthouse beacon chain %d with flags: %s", node.index, strings.Join(args[2:], " "))
log.Infof("Starting lighthouse beacon chain %d with flags: %s", index, strings.Join(args[2:], " "))
if err = cmd.Start(); err != nil {
return fmt.Errorf("failed to start beacon node: %w", err)
}
if err = helpers.WaitForTextInFile(stderr, "Configured for network"); err != nil {
return fmt.Errorf("could not find initialization for node %d, this means the node had issues starting: %w", node.index, err)
return fmt.Errorf("could not find initialization for node %d, this means the node had issues starting: %w", index, err)
}
// Mark node as ready.
@@ -223,11 +176,11 @@ func (node *LighthouseBeaconNode) Started() <-chan struct{} {
func (node *LighthouseBeaconNode) createTestnetDir(index int) (string, error) {
testNetDir := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-testnet-%d", index)
configPath := filepath.Join(testNetDir, "config.yaml")
rawYaml := params.ConfigToYaml(node.config.BeaconChainConfig)
rawYaml := params.E2EMainnetConfigYaml()
// Add in deposit contract in yaml
depContractStr := fmt.Sprintf("\nDEPOSIT_CONTRACT_ADDRESS: %#x", e2e.TestParams.ContractAddress)
rawYaml = append(rawYaml, []byte(depContractStr)...)
if err := file.MkdirAll(testNetDir); err != nil {
return "", err
}
@@ -243,49 +196,3 @@ func (node *LighthouseBeaconNode) createTestnetDir(index int) (string, error) {
deployYaml := []byte("0")
return testNetDir, file.WriteFile(deployPath, deployYaml)
}
func (node *LighthouseBeaconNode) ZPath() string {
return fmt.Sprintf("/lh-beacon-node/%d", node.index)
}
var lbnzm = template.Must(template.New("BeaconNode.ZMarkdown").Parse("" +
"beacon node {{.Index}}\n" +
"--------------\n\n" +
"http addr={{.HTTPAddr}}\n\n" +
"db path={{.DBPath}}\n\n" +
"stdout path={{.StdoutPath}}\n\n" +
"stderr path={{.StderrPath}}\n\n" +
"```\n" +
"{{.StartCmd}}" +
"```\n\n"))
func (node *LighthouseBeaconNode) ZMarkdown() (string, error) {
bin, args, err := node.startCommand()
if err != nil {
return "", err
}
cmd := path.Join(bin, args[0])
for _, a := range args {
cmd += fmt.Sprintf("\n%s \\", a)
}
buf := bytes.NewBuffer(nil)
err = lbnzm.Execute(buf, struct {
Index int
StartCmd string
DBPath string
StdoutPath string
StderrPath string
HTTPAddr string
}{
Index: node.index,
StartCmd: cmd,
DBPath: node.dbPath(),
StdoutPath: node.stdoutPath(),
StderrPath: node.stderrPath(),
HTTPAddr: fmt.Sprintf("http://localhost:%d", node.httpPort()),
})
return buf.String(), err
}
var _ e2ez.ZPage = &LighthouseBeaconNode{}

View File

@@ -142,17 +142,16 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
fmt.Sprintf("--%s=localhost:%d", flags.BeaconRPCProviderFlag.Name, beaconRPCPort),
fmt.Sprintf("--%s=%s", flags.GrpcHeadersFlag.Name, "dummy=value,foo=bar"), // Sending random headers shouldn't break anything.
fmt.Sprintf("--%s=%s", cmdshared.VerbosityFlag.Name, "debug"),
fmt.Sprintf("--%s=%s", cmdshared.ChainConfigFileFlag.Name, v.config.BeaconChainConfigPath()),
"--" + cmdshared.ForceClearDB.Name,
"--" + cmdshared.E2EConfigFlag.Name,
"--" + cmdshared.AcceptTosFlag.Name,
}
// Only apply e2e flags to the current branch. New flags may not exist in previous release.
if !v.config.UsePrysmShValidator {
args = append(args, features.E2EValidatorFlags...)
}
if v.config.UseWeb3RemoteSigner {
args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
args = append(args, fmt.Sprintf("--%s=http://localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
// Write the pubkeys as comma seperated hex strings with 0x prefix.
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
_, pubs, err := interop.DeterministicallyGenerateKeys(uint64(offset), uint64(validatorNum))

View File

@@ -37,13 +37,16 @@ type rawKeyFile struct {
}
type Web3RemoteSigner struct {
ctx context.Context
started chan struct{}
ctx context.Context
started chan struct{}
configFilePath string
cmd *exec.Cmd
}
func NewWeb3RemoteSigner() *Web3RemoteSigner {
func NewWeb3RemoteSigner(configFilePath string) *Web3RemoteSigner {
return &Web3RemoteSigner{
started: make(chan struct{}, 1),
started: make(chan struct{}, 1),
configFilePath: configFilePath,
}
}
@@ -66,6 +69,12 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
return err
}
network := "minimal"
if len(w.configFilePath) > 0 {
// A file path to yaml config file is acceptable network argument.
network = w.configFilePath
}
args := []string{
// Global flags
fmt.Sprintf("--key-store-path=%s", keystorePath),
@@ -75,13 +84,13 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
// Command
"eth2",
// Command flags
"--network=minimal",
"--network=" + network,
"--slashing-protection-enabled=false", // Otherwise, a postgres DB is required.
"--enable-key-manager-api=true",
"--key-manager-api-enabled=true",
}
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Test code is safe to do this.
w.cmd = cmd
// Write stdout and stderr to log files.
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, "web3signer.stdout.log"))
if err != nil {

View File

@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
e2eparams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
@@ -13,7 +14,11 @@ import (
func TestWeb3RemoteSigner_StartsAndReturnsPublicKeys(t *testing.T) {
require.NoError(t, e2eparams.Init(0))
wsc := components.NewWeb3RemoteSigner()
fp, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
wsc := components.NewWeb3RemoteSigner(fp)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

View File

@@ -6,10 +6,11 @@ lighthouse_archive_name = "lighthouse-%s-x86_64-unknown-linux-gnu-portable.tar.g
def e2e_deps():
http_archive(
name = "web3signer",
urls = ["https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.tar.gz/versions/21.10.5/web3signer-21.10.5.tar.gz"],
sha256 = "d122429f6a310bc555d1281e0b3f4e3ac43a7beec5e5dcf0a0d2416a5984f461",
# Built from commit 17d253b which has important unreleased changes.
urls = ["https://prysmaticlabs.com/uploads/web3signer-17d253b.tar.gz"],
sha256 = "bf450a59a0845c1ce8100b3192c7fec021b565efe8b1ab46bed9f71cb994a6d7",
build_file = "@prysm//testing/endtoend:web3signer.BUILD",
strip_prefix = "web3signer-21.10.5",
strip_prefix = "web3signer-develop",
)
http_archive(

View File

@@ -1,12 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "github.com/prysmaticlabs/prysm/testing/endtoend/e2ez",
visibility = ["//visibility:public"],
deps = [
"@com_github_gomarkdown_markdown//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,113 +0,0 @@
package e2ez
import (
"bytes"
"context"
"net/http"
"github.com/gomarkdown/markdown"
log "github.com/sirupsen/logrus"
)
// Server is an http server that serves all zpages.
// if zpages register using the HandleMarkdown method, their responses will be transformed from markdown
// to html before being streamed back to the client.
type Server struct {
handler *http.ServeMux
ec chan error
}
// NewServer should be used to instantiate a Server, so that it can set up the internal http.Handler
// and http.Server values.
func NewServer() *Server {
return &Server{
handler: http.NewServeMux(),
ec: make(chan error),
}
}
// ListenAndServe just starts the underlying http.Server using the provided addr.
// This method does not use a goroutine and will block, call it in a goroutine
// if you do not want the caller to block.
func (s *Server) ListenAndServe(ctx context.Context, addr string) {
srv := &http.Server{
Addr: addr,
Handler: s.handler,
}
go func() {
if err := srv.ListenAndServe(); err != nil {
s.ec <- err
}
}()
for {
select {
case err := <-s.ec:
log.Error(err)
case <-ctx.Done():
err := srv.Shutdown(ctx)
if err != nil {
log.Error(err)
}
}
}
}
// HandleMarkdown mirrors http.HandleFunc. It wraps the given handler in a "middleware" enclosure that assumes
// a successful response body is a markdown document, translating the markdown to an html page.
func (s *Server) HandleMarkdown(pattern string, handler func(http.ResponseWriter, *http.Request)) {
s.handler.HandleFunc(pattern, handleMarkdown(handler, s.ec))
}
// HandleZPage allows any type that implements the minimal ZPage interface to
// handle requests for information about itself.
func (s *Server) HandleZPages(zps ...ZPage) {
for i := 0; i < len(zps); i++ {
z := zps[i]
f := func(rw http.ResponseWriter, req *http.Request) {
md, err := z.ZMarkdown()
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
s.ec <- err
return
}
rw.Header().Add("Content-Type", "text/html")
rw.WriteHeader(http.StatusOK)
_, err = rw.Write(markdown.ToHTML([]byte(md), nil, nil))
if err != nil {
s.ec <- err
return
}
}
s.handler.HandleFunc(z.ZPath(), f)
}
}
type markdownResponseWriter struct {
http.ResponseWriter
buf *bytes.Buffer
}
func (w *markdownResponseWriter) Write(i []byte) (int, error) {
return w.buf.Write(i)
}
func handleMarkdown(wrapped http.HandlerFunc, ec chan error) http.HandlerFunc {
return func(rw http.ResponseWriter, req *http.Request) {
w := &markdownResponseWriter{ResponseWriter: rw, buf: bytes.NewBuffer(nil)}
wrapped(w, req)
hb := markdown.ToHTML(w.buf.Bytes(), nil, nil)
_, err := rw.Write(hb)
if err != nil {
ec <- err
return
}
}
}
// ZPage allows a type to generate a markdown zpage without consideration for http server semantics.
// ZPath() is used to claim a path for itself in the zpage namespace, ZMarkdown returns the markdown
// value to translate to HTML.
type ZPage interface {
ZPath() string
ZMarkdown() (string, error)
}

View File

@@ -8,28 +8,21 @@ import (
"context"
"fmt"
"os"
"os/signal"
"path"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/api/client/beacon"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/build/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/proto/eth/service"
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
"github.com/prysmaticlabs/prysm/testing/endtoend/components/eth1"
"github.com/prysmaticlabs/prysm/testing/endtoend/e2ez"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
@@ -38,8 +31,6 @@ import (
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
@@ -58,84 +49,37 @@ func init() {
// testRunner abstracts E2E test configuration and running.
type testRunner struct {
t *testing.T
config *e2etypes.E2EConfig
z *e2ez.Server
ctx context.Context
doneChan context.CancelFunc
group *errgroup.Group
t *testing.T
config *e2etypes.E2EConfig
}
// newTestRunner creates E2E test runner.
func newTestRunner(t *testing.T, config *e2etypes.E2EConfig) *testRunner {
ctx, done := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx)
return &testRunner{
t: t,
config: config,
z: e2ez.NewServer(),
ctx: ctx,
doneChan: done,
group: g,
t: t,
config: config,
}
}
type zPageMenu struct{}
func (z *zPageMenu) ZPath() string {
return "/"
}
func (z *zPageMenu) ZMarkdown() (string, error) {
return `
e2e admin
===========
- [prysm beacon nodes](/beacon-nodes)
- [lh beacon nodes](/lh-beacon-nodes)
`, nil
}
func (z *zPageMenu) ZChildren() []e2ez.ZPage {
return []e2ez.ZPage{}
}
// run executes configured E2E test.
func (r *testRunner) run() {
t, config := r.t, r.config
err := config.WriteBeaconChainConfig()
if err != nil {
t.Fatalf("failed to write BeaconChainConfig to bazel sandbox")
}
t.Logf("Shard index: %d\n", e2e.TestParams.TestShardIndex)
t.Logf("Starting time: %s\n", time.Now().String())
t.Logf("Log Path: %s\n", e2e.TestParams.LogPath)
if e2e.TestParams.ZPageAddr == "" {
e2e.TestParams.ZPageAddr = ":8080"
}
// we need debug turned on and max ssz payload bumped up when running checkpoint sync teste2e.TestParams.BeaconNodeCounts
if config.TestSync {
config.BeaconFlags = appendDebugEndpoints(config.BeaconFlags)
}
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
var keyGen, lighthouseValidatorNodes e2etypes.ComponentRunner
var lighthouseNodes *components.LighthouseBeaconNodeSet
ctx := r.ctx
g := r.group
done := r.doneChan
ctx, done := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx)
tracingSink := components.NewTracingSink(config.TracingSinkEndpoint)
g.Go(func() error {
return tracingSink.Start(ctx)
})
zp := e2ez.NewServer()
zp.HandleZPages(&zPageMenu{})
go zp.ListenAndServe(ctx, e2e.TestParams.ZPageAddr)
if multiClientActive {
keyGen = components.NewKeystoreGenerator()
@@ -145,25 +89,21 @@ func (r *testRunner) run() {
return keyGen.Start(ctx)
})
}
// wait on this channel if LeaveRunning is specified
lrChan := make(chan struct{})
g.Go(func() error {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(sigc)
if !r.config.LeaveRunning {
close(lrChan)
return nil
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
cfg, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
// if LeaveRunning flag has been set, cause this goroutine to block until
// the context is canceled or signint/sigterm is received.
select {
case <-sigc:
close(lrChan)
log.Info("got sigint/term in LeaveRunning keepalive routine")
web3RemoteSigner = components.NewWeb3RemoteSigner(cfg)
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
}
})
})
}
// Boot node.
bootNode := components.NewBootNode()
@@ -209,27 +149,13 @@ func (r *testRunner) run() {
return nil
})
// Web3 remote signer.
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
web3RemoteSigner = components.NewWeb3RemoteSigner()
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
// Beacon nodes.
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{bootNode}); err != nil {
t.Fatal(err, errors.Wrap(err, "beacon nodes require ETH1 and boot node to run"))
}
beaconNodes := components.NewBeaconNodes(config, bootNode.ENR(), config.BeaconFlags, zp)
beaconNodes := components.NewBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode}); err != nil {
t.Fatal(err, errors.Wrap(err, "beacon nodes require ETH1 and boot node to run"))
return errors.Wrap(err, "beacon nodes require ETH1 and boot node to run")
}
beaconNodes.SetENR(bootNode.ENR())
if err := beaconNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start beacon nodes")
}
@@ -237,11 +163,12 @@ func (r *testRunner) run() {
})
if multiClientActive {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode, beaconNodes}); err != nil {
t.Fatal(errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run"))
}
lighthouseNodes = components.NewLighthouseBeaconNodes(config, bootNode.ENR(), zp)
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode, beaconNodes}); err != nil {
return errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run")
}
lighthouseNodes.SetENR(bootNode.ENR())
if err := lighthouseNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start lighthouse beacon nodes")
}
@@ -282,12 +209,7 @@ func (r *testRunner) run() {
g.Go(func() error {
// When everything is done, cancel parent context (will stop all spawned nodes).
defer func() {
log.Info("All E2E evaluations are finished.")
if config.LeaveRunning {
log.Info("LeaveRunning flag set, services won't shut down until ctrl+c received.")
return
}
log.Info("Canceling context to clean up.")
log.Info("All E2E evaluations are finished, cleaning up")
done()
}()
@@ -336,29 +258,21 @@ func (r *testRunner) run() {
require.NoError(t, err)
tickingStartTime := helpers.EpochTickerStartTime(genesis)
index := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
// Run assigned evaluators.
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
return errors.Wrap(err, "one or more evaluators failed")
}
if err := r.testBeaconChainSync(ctx, g, index, conns, tickingStartTime, beaconNodes, eth1Miner.ENR()); err != nil {
// If requested, run sync test.
if !config.TestSync {
return nil
}
if err := r.testBeaconChainSync(ctx, g, conns, tickingStartTime, bootNode.ENR(), eth1Miner.ENR()); err != nil {
return errors.Wrap(err, "beacon chain sync test failed")
}
index += 1
if err := r.testDoppelGangerProtection(ctx); err != nil {
return errors.Wrap(err, "doppel ganger protection check failed")
}
// If requested, run sync test.
if config.TestSync {
httpEndpoints := helpers.BeaconAPIHostnames(e2e.TestParams.BeaconNodeCount)
menr := eth1Miner.ENR()
if err := r.testCheckpointSync(index, conns, beaconNodes, httpEndpoints[0], menr); err != nil {
return errors.Wrap(err, "checkpoint sync test failed")
}
index += 1
}
return nil
})
@@ -367,21 +281,10 @@ func (r *testRunner) run() {
if strings.Contains(err.Error(), "signal: killed") {
return
}
if config.LeaveRunning {
<-lrChan
}
t.Fatalf("E2E test ended in error: %v", err)
}
}
func appendDebugEndpoints(flags []string) []string {
debugFlags := []string{
"--enable-debug-rpc-endpoints",
"--grpc-max-msg-size=65568081",
}
return append(flags, debugFlags...)
}
// waitForChainStart allows to wait up until beacon nodes are started.
func (r *testRunner) waitForChainStart() {
// Sleep depending on the count of validators, as generating the genesis state could take some time.
@@ -433,7 +336,6 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
depositCheckValidator := components.NewValidatorNode(r.config, int(e2e.DepositCount), e2e.TestParams.BeaconNodeCount, minGenesisActiveCount)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {
@@ -466,115 +368,11 @@ func (r *testRunner) testTxGeneration(ctx context.Context, g *errgroup.Group, ke
})
}
func (r *testRunner) waitForMatchingHead(ctx context.Context, check, ref *grpc.ClientConn) error {
// sleep hack copied from testBeaconChainSync
// Sleep a second for every 4 blocks that need to be synced for the newly started node.
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
extraSecondsToSync := (r.config.EpochsToRun)*secondsPerEpoch + uint64(params.BeaconConfig().SlotsPerEpoch.Div(4).Mul(r.config.EpochsToRun))
deadline := time.Now().Add(time.Second*time.Duration(extraSecondsToSync))
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
checkClient := service.NewBeaconChainClient(check)
refClient := service.NewBeaconChainClient(ref)
for {
select {
case <-ctx.Done():
// deadline ensures that the test eventually exits when beacon node fails to sync in a resonable timeframe
return fmt.Errorf("deadline exceeded waiting for known good block to appear in checkpoint-synced node")
default:
cResp, err := checkClient.GetBlockRoot(ctx, &v1.BlockRequest{BlockId: []byte("head")})
if err != nil {
errStatus, ok := status.FromError(err)
// in the happy path we expect NotFound results until the node has synced
if ok && errStatus.Code() == codes.NotFound {
continue
}
return fmt.Errorf("error requesting head from 'check' beacon node")
}
rResp, err := refClient.GetBlockRoot(ctx, &v1.BlockRequest{BlockId: []byte("head")})
if err != nil {
return errors.Wrap(err, "unexpected error requesting head block root from 'ref' beacon node")
}
if bytesutil.ToBytes32(cResp.Data.Root) == bytesutil.ToBytes32(rResp.Data.Root) {
return nil
}
}
}
}
func (r *testRunner) testCheckpointSync(i int, conns []*grpc.ClientConn, nodes *components.BeaconNodeSet, bnAPI, minerEnr string) error {
ethNode := eth1.NewNode(i, minerEnr)
r.group.Go(func() error {
return ethNode.Start(r.ctx)
})
if err := helpers.ComponentsStarted(r.ctx, []e2etypes.ComponentRunner{ethNode}); err != nil {
return fmt.Errorf("sync beacon node not ready: %w", err)
}
client, err := beacon.NewClient(bnAPI)
if err != nil {
return err
}
od, err := beacon.DownloadOriginData(r.ctx, client)
if err != nil {
return err
}
blockPath, err := od.SaveBlock(e2e.TestParams.TestPath)
if err != nil {
return err
}
statePath, err := od.SaveState(e2e.TestParams.TestPath)
if err != nil {
return err
}
gb, err := client.GetState(r.ctx, beacon.IdGenesis)
if err != nil {
return err
}
genPath := path.Join(e2e.TestParams.TestPath, "genesis.ssz")
err = file.WriteFile(genPath, gb)
if err != nil {
return err
}
flags := append([]string{}, r.config.BeaconFlags...)
flags = append(flags, fmt.Sprintf("--weak-subjectivity-checkpoint=%s", od.CheckpointString()))
flags = append(flags, fmt.Sprintf("--checkpoint-state=%s", statePath))
flags = append(flags, fmt.Sprintf("--checkpoint-block=%s", blockPath))
flags = append(flags, fmt.Sprintf("--genesis-state=%s", genPath))
// zero-indexed, so next value would be len of list
cpsyncer := nodes.AddBeaconNode(i, flags)
r.group.Go(func() error {
return cpsyncer.Start(r.ctx)
})
if err := helpers.ComponentsStarted(r.ctx, []e2etypes.ComponentRunner{cpsyncer}); err != nil {
return fmt.Errorf("checkpoint sync beacon node not ready: %w", err)
}
c, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+i), grpc.WithInsecure())
require.NoError(r.t, err, "Failed to dial")
// this is so that the syncEvaluators checks can run on the checkpoint sync'd node
conns = append(conns, c)
err = r.waitForMatchingHead(r.ctx, c, conns[0])
if err != nil {
return err
}
syncEvaluators := []e2etypes.Evaluator{ev.FinishedSyncing, ev.AllNodesHaveSameHead}
for _, evaluator := range syncEvaluators {
r.t.Run(evaluator.Name, func(t *testing.T) {
assert.NoError(t, evaluator.Evaluation(conns...), "Evaluation failed for sync node")
})
}
return nil
}
// testBeaconChainSync creates another beacon node, and tests whether it can sync to head using previous nodes.
func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group, index int,
conns []*grpc.ClientConn, tickingStartTime time.Time, beaconNodes *components.BeaconNodeSet, minerEnr string) error {
t := r.t
func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
conns []*grpc.ClientConn, tickingStartTime time.Time, bootnodeEnr, minerEnr string) error {
t, config := r.t, r.config
index := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
ethNode := eth1.NewNode(index, minerEnr)
g.Go(func() error {
return ethNode.Start(ctx)
@@ -582,9 +380,7 @@ func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{ethNode}); err != nil {
return fmt.Errorf("sync beacon node not ready: %w", err)
}
//syncBeaconNode := components.NewBeaconNode(index, bootnodeEnr, r.config.BeaconFlags, config)
syncBeaconNode := beaconNodes.AddBeaconNode(index, r.config.BeaconFlags)
syncBeaconNode := components.NewBeaconNode(config, index, bootnodeEnr)
g.Go(func() error {
return syncBeaconNode.Start(ctx)
})
@@ -597,17 +393,17 @@ func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
// Sleep a second for every 4 blocks that need to be synced for the newly started node.
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
extraSecondsToSync := (r.config.EpochsToRun)*secondsPerEpoch + uint64(params.BeaconConfig().SlotsPerEpoch.Div(4).Mul(r.config.EpochsToRun))
extraSecondsToSync := (config.EpochsToRun)*secondsPerEpoch + uint64(params.BeaconConfig().SlotsPerEpoch.Div(4).Mul(config.EpochsToRun))
waitForSync := tickingStartTime.Add(time.Duration(extraSecondsToSync) * time.Second)
time.Sleep(time.Until(waitForSync))
syncLogFile, err := os.Open(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, index)))
require.NoError(r.t, err)
defer helpers.LogErrorOutput(r.t, syncLogFile, "beacon chain node", index)
r.t.Run("sync completed", func(t *testing.T) {
require.NoError(t, err)
defer helpers.LogErrorOutput(t, syncLogFile, "beacon chain node", index)
t.Run("sync completed", func(t *testing.T) {
assert.NoError(t, helpers.WaitForTextInFile(syncLogFile, "Synced up to"), "Failed to sync")
})
if r.t.Failed() {
if t.Failed() {
return errors.New("cannot sync beacon node")
}
@@ -615,7 +411,7 @@ func (r *testRunner) testBeaconChainSync(ctx context.Context, g *errgroup.Group,
time.Sleep(time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
syncEvaluators := []e2etypes.Evaluator{ev.FinishedSyncing, ev.AllNodesHaveSameHead}
for _, evaluator := range syncEvaluators {
r.t.Run(evaluator.Name, func(t *testing.T) {
t.Run(evaluator.Name, func(t *testing.T) {
assert.NoError(t, evaluator.Evaluation(conns...), "Evaluation failed for sync node")
})
}

View File

@@ -7,6 +7,7 @@ go_library(
"api_gateway_v1alpha1.go",
"api_middleware.go",
"data.go",
"execution_engine.go",
"finality.go",
"fork.go",
"metrics.go",

View File

@@ -0,0 +1,54 @@
package evaluators
import (
"context"
"math"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/policies"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
// TransactionsPresent is an evaluator to make sure transactions send to the execution engine
// appear in consensus client blocks' execution payload.
var TransactionsPresent = types.Evaluator{
Name: "transactions_present_at_epoch_%d",
Policy: policies.AfterNthEpoch(helpers.BellatrixE2EForkEpoch),
Evaluation: transactionsPresent,
}
func transactionsPresent(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconChainClient(conn)
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
req := &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch.Sub(1)}}
blks, err := client.ListBeaconBlocks(context.Background(), req)
if err != nil {
return errors.Wrap(err, "failed to get blocks from beacon-chain")
}
expectedTxNum := int(math.Round(float64(params.E2ETestConfig().SlotsPerEpoch) * float64(e2e.NumOfExecEngineTxs) * e2e.ExpectedExecEngineTxsThreshold))
var numberOfTxs int
for _, ctr := range blks.BlockContainers {
switch ctr.Block.(type) {
case *ethpb.BeaconBlockContainer_BellatrixBlock:
numberOfTxs += len(ctr.GetBellatrixBlock().Block.Body.ExecutionPayload.Transactions)
}
}
if numberOfTxs < expectedTxNum {
return errors.Errorf(
"not enough transactions in execution payload, expected=%d vs actual=%d",
expectedTxNum,
numberOfTxs,
)
}
return nil
}

View File

@@ -14,14 +14,21 @@ import (
"google.golang.org/grpc"
)
// ForkTransition ensures that the hard fork has occurred successfully.
var ForkTransition = types.Evaluator{
Name: "fork_transition_%d",
// AltairForkTransition ensures that the Altair hard fork has occurred successfully.
var AltairForkTransition = types.Evaluator{
Name: "altair_fork_transition_%d",
Policy: policies.OnEpoch(helpers.AltairE2EForkEpoch),
Evaluation: forkOccurs,
Evaluation: altairForkOccurs,
}
func forkOccurs(conns ...*grpc.ClientConn) error {
// BellatrixForkTransition ensures that the Bellatrix hard fork has occurred successfully.
var BellatrixForkTransition = types.Evaluator{
Name: "bellatrix_fork_transition_%d",
Policy: policies.OnEpoch(helpers.BellatrixE2EForkEpoch),
Evaluation: bellatrixForkOccurs,
}
func altairForkOccurs(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
ctx, cancel := context.WithCancel(context.Background())
@@ -62,3 +69,48 @@ func forkOccurs(conns ...*grpc.ClientConn) error {
}
return nil
}
func bellatrixForkOccurs(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.StreamBlocksAltair(ctx, &ethpb.StreamBlocksRequest{VerifiedOnly: true})
if err != nil {
return errors.Wrap(err, "failed to get stream")
}
fSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
if err != nil {
return err
}
if ctx.Err() == context.Canceled {
return errors.New("context canceled prematurely")
}
res, err := stream.Recv()
if err != nil {
return err
}
if res == nil || res.Block == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetPhase0Block() == nil && res.GetAltairBlock() == nil && res.GetBellatrixBlock() == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetPhase0Block() != nil {
return errors.New("phase 0 block returned after bellatrix fork has occurred")
}
if res.GetAltairBlock() != nil {
return errors.New("altair block returned after bellatrix fork has occurred")
}
blk, err := wrapperv2.WrappedSignedBeaconBlock(res.GetBellatrixBlock())
if err != nil {
return err
}
if err := coreHelper.BeaconBlockIsNil(blk); err != nil {
return err
}
if blk.Block().Slot() < fSlot {
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
}
return nil
}

View File

@@ -419,5 +419,8 @@ func convertToBlockInterface(obj *ethpb.BeaconBlockContainer) (block.SignedBeaco
if obj.GetBellatrixBlock() != nil {
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
}
if obj.GetBellatrixBlock() != nil {
return wrapper.WrappedSignedBeaconBlock(obj.GetBellatrixBlock())
}
return nil, errors.New("container has no block")
}

View File

@@ -270,15 +270,6 @@ func NewLocalConnections(ctx context.Context, numConns int) ([]*grpc.ClientConn,
}, nil
}
func BeaconAPIHostnames(numConns int) []string {
hostnames := make([]string, 0)
for i := 0; i < numConns; i++ {
port := e2e.TestParams.Ports.PrysmBeaconNodeGatewayPort + i
hostnames = append(hostnames, fmt.Sprintf("127.0.0.1:%d", port))
}
return hostnames
}
// ComponentsStarted checks, sequentially, each provided component, blocks until all of the components are ready.
func ComponentsStarted(ctx context.Context, comps []e2etypes.ComponentRunner) error {
for _, comp := range comps {

View File

@@ -19,8 +19,7 @@ func TestEndToEnd_MainnetConfig(t *testing.T) {
}
func e2eMainnet(t *testing.T, usePrysmSh bool) {
cfg := params.E2EMainnetTestConfig()
params.OverrideBeaconConfig(cfg)
params.UseE2EMainnetConfig()
require.NoError(t, e2eParams.InitMultiClient(e2eParams.StandardBeaconCount, e2eParams.StandardLighthouseNodeCount))
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
@@ -55,11 +54,13 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ColdStateCheckpoint,
ev.ForkTransition,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.TransactionsPresent,
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
@@ -70,7 +71,7 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: false,
TestSync: true,
TestFeature: true,
TestDeposits: true,
UseFixedPeerIDs: true,
@@ -80,7 +81,6 @@ func e2eMainnet(t *testing.T, usePrysmSh bool) {
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
Seed: int64(seed),
BeaconChainConfig: cfg,
}
newTestRunner(t, testConfig).run()

View File

@@ -27,7 +27,6 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
}
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
t.Skip("TODO(9994): Complete web3signer client implementation, currently blocked by https://github.com/ConsenSys/web3signer/issues/494")
e2eMinimal(t, &testArgs{
usePrysmSh: false,
useWeb3RemoteSigner: true,
@@ -43,8 +42,7 @@ func TestEndToEnd_MinimalConfig_ValidatorAtCurrentRelease(t *testing.T) {
}
func e2eMinimal(t *testing.T, args *testArgs) {
cfg := params.E2ETestConfig()
params.OverrideBeaconConfig(cfg)
params.UseE2EConfig()
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
@@ -55,6 +53,10 @@ func e2eMinimal(t *testing.T, args *testArgs) {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
// TODO(#10053): Web3signer does not support bellatrix yet.
if args.useWeb3RemoteSigner {
epochsToRun = helpers.BellatrixE2EForkEpoch - 1
}
if args.usePrysmSh {
// If using prysm.sh, run for only 6 epochs.
// TODO(#9166): remove this block once v2 changes are live.
@@ -84,12 +86,14 @@ func e2eMinimal(t *testing.T, args *testArgs) {
ev.ValidatorHasExited,
ev.ValidatorsVoteWithTheMajority,
ev.ColdStateCheckpoint,
ev.ForkTransition,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
ev.TransactionsPresent,
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
@@ -100,7 +104,7 @@ func e2eMinimal(t *testing.T, args *testArgs) {
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: false,
TestSync: true,
TestFeature: true,
TestDeposits: true,
UsePrysmShValidator: args.usePrysmSh,
@@ -109,7 +113,6 @@ func e2eMinimal(t *testing.T, args *testArgs) {
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
Seed: int64(seed),
BeaconChainConfig: cfg,
}
newTestRunner(t, testConfig).run()

View File

@@ -12,8 +12,7 @@ import (
)
func TestEndToEnd_Slasher_MinimalConfig(t *testing.T) {
cfg := params.E2ETestConfig()
params.OverrideBeaconConfig(cfg)
params.UseE2EConfig()
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
@@ -37,7 +36,6 @@ func TestEndToEnd_Slasher_MinimalConfig(t *testing.T) {
ev.InjectDoubleBlockOnEpoch(2),
},
TracingSinkEndpoint: tracingEndpoint,
BeaconChainConfig: cfg,
}
newTestRunner(t, testConfig).run()

View File

@@ -22,7 +22,6 @@ type params struct {
LighthouseBeaconNodeCount int
ContractAddress common.Address
Ports *ports
ZPageAddr string
}
type ports struct {
@@ -70,6 +69,12 @@ var StandardLighthouseNodeCount = 2
// DepositCount is the amount of deposits E2E makes on a separate validator client.
var DepositCount = uint64(64)
// NumOfExecEngineTxs is the number of transaction sent to the execution engine.
var NumOfExecEngineTxs = uint64(200)
// ExpectedExecEngineTxsThreshold is the portion of execution engine transactions we expect to find in blocks.
var ExpectedExecEngineTxsThreshold = 0.7
// Base port values.
const (
portSpan = 50

View File

@@ -7,9 +7,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/testing/endtoend/types",
visibility = ["//testing/endtoend:__subpackages__"],
deps = [
"//config/params:go_default_library",
"//io/file:go_default_library",
"//testing/endtoend/params:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
],

View File

@@ -4,13 +4,8 @@ package types
import (
"context"
"fmt"
"path"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/io/file"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"google.golang.org/grpc"
)
@@ -31,21 +26,6 @@ type E2EConfig struct {
BeaconFlags []string
ValidatorFlags []string
PeerIDs []string
BeaconChainConfig *params.BeaconChainConfig
LeaveRunning bool
}
// BeaconChainConfigPath determines the canonical path to the yaml-encoded BeaconChainConfig
// written by WriteBeaconChainConfig. Used by components to load a non-standard config in tests.
func (cfg *E2EConfig) BeaconChainConfigPath() string {
fname := fmt.Sprintf("beacon-chain-config_%s.yaml", cfg.BeaconChainConfig.ConfigName)
return path.Join(e2e.TestParams.LogPath, fname)
}
// WriteBeaconChainConfig writes the yaml encoding of the BeaconChainConfig struct member
// to a file at the path specified by BeaconChainConfigPath.
func (cfg *E2EConfig) WriteBeaconChainConfig() error {
return file.WriteFile(cfg.BeaconChainConfigPath(), params.ConfigToYaml(cfg.BeaconChainConfig))
}
// Evaluator defines the structure of the evaluators used to
@@ -63,3 +43,11 @@ type ComponentRunner interface {
// Started checks whether an underlying component is started and ready to be queried.
Started() <-chan struct{}
}
// BeaconNodeSet defines an interface for an object that fulfills the duties
// of a group of beacon nodes.
type BeaconNodeSet interface {
ComponentRunner
// SetENR provides the relevant bootnode's enr to the beacon nodes.
SetENR(enr string)
}

View File

@@ -102,3 +102,7 @@ func (m *engineMock) ExecutionBlockByHash(_ context.Context, hash common.Hash) (
Hash: b.BlockHash,
}, nil
}
func (m *engineMock) GetTerminalBlockHash(context.Context) ([]byte, bool, error) {
return nil, false, nil
}

View File

@@ -192,12 +192,8 @@ func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode
external = ipAddr
}
fVersion := params.BeaconConfig().GenesisForkVersion
fv := *forkVersion
if fv != "" {
if fv[0:2] == "0x" {
fv = fv[2:]
}
fVersion, err = hex.DecodeString(fv)
if *forkVersion != "" {
fVersion, err = hex.DecodeString(*forkVersion)
if err != nil {
return nil, errors.Wrap(err, "Could not retrieve fork version")
}

Some files were not shown because too many files have changed in this diff Show More