Compare commits

...

16 Commits

Author SHA1 Message Date
prestonvanloon
568cc9a9df temp commit with some commentary. revert this commit later 2022-03-03 11:38:39 -06:00
prestonvanloon
0d08d4f2ca temp commit with some commentary. revert this commit later 2022-03-02 18:32:31 -06:00
prestonvanloon
cb4964b575 temp commit with some commentary. revert this commit later 2022-03-02 18:21:32 -06:00
prestonvanloon
7ed71b27f1 temp commit with some commentary. revert this commit later 2022-03-02 18:18:01 -06:00
prestonvanloon
7710db4ec2 temp commit with some commentary. revert this commit later 2022-03-02 18:05:11 -06:00
prestonvanloon
ff1190a387 temp commit with some commentary. revert this commit later 2022-03-02 17:55:30 -06:00
prestonvanloon
ad849dbac4 temp commit with some commentary. revert this commit later 2022-03-02 17:24:59 -06:00
prestonvanloon
abe667a262 temp commit with some commentary. revert this commit later 2022-03-02 17:21:02 -06:00
prestonvanloon
c51ea298a2 temp commit with some commentary. revert this commit later 2022-03-02 17:16:55 -06:00
prestonvanloon
72b3114f37 temp commit with some commentary. revert this commit later 2022-03-02 17:13:52 -06:00
prestonvanloon
1930550675 temp commit with some commentary. revert this commit later 2022-03-02 15:14:49 -06:00
prestonvanloon
ea68d4b10e temp commit to test e2e 2022-03-02 14:29:06 -06:00
Raul Jordan
958dd9d783 Check Engine API Transition Configuration in Background (#10250)
* transition proto

* gen pb

* builds

* impl transition config

* begin tests

* transition config messed up

* amend proto

* use str

* passing

* gaz

* config

* client test

* pb

* set to 0

* rem log

* gaz

* check transition config

* check config differences

* check transition config in background

* gaz

* pass

* redundant

* fix up error handling and healthz

* simplify status

* gazelle

* build

* err config check

* test

* gaz

* Fix run time

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-01 21:38:51 +00:00
Preston Van Loon
a3f8ccd924 validator: Fix flaky test TestServer_RefreshJWTSecretOnFileChange (#10296) 2022-03-01 20:34:05 +00:00
terence tsao
d64f6cb7a8 Add engine methods to block processing (#10285)
* Add notify newPayload and forkchoiceUpdate

* Tests

* Raul's feedback

* Update optimistic_sync.go

* Simplify

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-01 16:43:06 +00:00
terence tsao
a9a75e0004 Release lock before return validatedTips (#10289)
* Release lock before return

* add test

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-01 15:36:47 +00:00
25 changed files with 882 additions and 134 deletions

View File

@@ -9,15 +9,17 @@ import (
)
type mockEngineService struct {
blks map[[32]byte]*enginev1.ExecutionBlock
newPayloadError error
forkchoiceError error
blks map[[32]byte]*enginev1.ExecutionBlock
}
func (*mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, nil
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, m.newPayloadError
}
func (*mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, nil
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, m.forkchoiceError
}
func (*mockEngineService) GetPayloadV1(
@@ -30,8 +32,8 @@ func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.Execut
return nil, nil
}
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) (*enginev1.TransitionConfiguration, error) {
return nil, nil
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
return nil
}
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {

View File

@@ -2,15 +2,140 @@ package blockchain
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/sirupsen/logrus"
)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
return nil, errors.New("nil head block")
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
if isPreBellatrix(headBlk.Version()) {
return nil, nil
}
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if isPreBellatrix(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash,
}
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
return payloadID, nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload
func (s *Service) notifyNewPayload(ctx context.Context, preState, postState state.BeaconState, blk block.SignedBeaconBlock) error {
if preState == nil || postState == nil {
return errors.New("pre and post states must not be nil")
}
// Execution payload is only supported in Bellatrix and beyond.
if isPreBellatrix(postState.Version()) {
return nil
}
if err := helpers.BeaconBlockIsNil(blk); err != nil {
return err
}
body := blk.Block().Body()
enabled, err := blocks.ExecutionEnabled(postState, blk.Block().Body())
if err != nil {
return errors.Wrap(err, "could not determine if execution is enabled")
}
if !enabled {
return nil
}
payload, err := body.ExecutionPayload()
if err != nil {
return errors.Wrap(err, "could not get execution payload")
}
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"slot": postState.Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return nil
default:
return errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preState.Version()) {
return nil
}
atTransition, err := blocks.MergeTransitionBlock(preState, body)
if err != nil {
return errors.Wrap(err, "could not check if merge block is terminal")
}
if !atTransition {
return nil
}
return s.validateMergeBlock(ctx, blk)
}
// isPreBellatrix returns true if input version is before bellatrix fork.
func isPreBellatrix(v int) bool {
return v == version.Phase0 || v == version.Altair
}
// optimisticCandidateBlock returns true if this block can be optimistically synced.
//
// Spec pseudocode definition:

View File

@@ -7,10 +7,13 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -19,6 +22,305 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, err)
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
finalizedRoot [32]byte
newForkchoiceErr error
errString string
}{
{
name: "nil block",
errString: "nil head block",
},
{
name: "phase0 block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return b
}(),
},
{
name: "altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}})
require.NoError(t, err)
return b
}(),
},
{
name: "not execution block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
})
require.NoError(t, err)
return b
}(),
},
{
name: "happy case: finalized root is altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: altairBlkRoot,
},
{
name: "happy case: finalized root is bellatrix block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with optimistic block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with invalid block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
})
}
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
preState state.BeaconState
postState state.BeaconState
blk block.SignedBeaconBlock
newPayloadErr error
errString string
}{
{
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
},
{
name: "altair post state",
postState: altairState,
preState: altairState,
},
{
name: "nil states",
errString: "pre and post states must not be nil",
},
{
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
},
{
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state",
postState: bellatrixState,
preState: altairState,
blk: bellatrixBlk,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "not at merge transition",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "happy case",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
}
for _, tt := range tests {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
err := service.notifyNewPayload(ctx, tt.preState, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
}
}
func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())

View File

@@ -84,7 +84,7 @@ type config struct {
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller enginev1.EngineCaller
ExecutionEngineCaller enginev1.Caller
}
// NewService instantiates a new block service instance that will

View File

@@ -47,6 +47,7 @@ func (f *ForkChoice) Optimistic(ctx context.Context, root [32]byte, slot types.S
f.syncedTips.RLock()
_, ok := f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()

View File

@@ -200,6 +200,10 @@ func TestOptimistic(t *testing.T) {
op, err = f.Optimistic(ctx, nodeK.root, nodeK.slot)
require.NoError(t, err)
require.Equal(t, op, true)
// request a write Lock to synced Tips regression #10289
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
}
// This tests the algorithm to update syncedTips

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"block_cache.go",
"block_reader.go",
"check_transition_config.go",
"deposit.go",
"log.go",
"log_processing.go",
@@ -44,6 +45,7 @@ go_library(
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
@@ -54,6 +56,7 @@ go_library(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -69,6 +72,7 @@ go_test(
srcs = [
"block_cache_test.go",
"block_reader_test.go",
"check_transition_config_test.go",
"deposit_test.go",
"init_test.go",
"log_processing_test.go",
@@ -87,6 +91,8 @@ go_test(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -0,0 +1,82 @@
package powchain
import (
"context"
"errors"
"math/big"
"time"
"github.com/holiman/uint256"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/config/params"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
var (
checkTransitionPollingInterval = time.Second * 10
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
"your node will not be able to complete the proof-of-stake transition"
)
// Checks the transition configuration between Prysm and the connected execution node to ensure
// there are no differences in terminal block difficulty and block hash.
// If there are any discrepancies, we must log errors to ensure users can resolve
//the problem and be ready for the merge transition.
func (s *Service) checkTransitionConfiguration(ctx context.Context) {
if s.engineAPIClient == nil {
return
}
i := new(big.Int)
i.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
ttd := new(uint256.Int)
ttd.SetFromBig(i)
cfg := &pb.TransitionConfiguration{
TerminalTotalDifficulty: ttd.Hex(),
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
TerminalBlockNumber: big.NewInt(0).Bytes(), // A value of 0 is recommended in the request.
}
err := s.engineAPIClient.ExchangeTransitionConfiguration(ctx, cfg)
if err != nil {
if errors.Is(err, v1.ErrConfigMismatch) {
log.WithError(err).Fatal(configMismatchLog)
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
// We poll the execution client to see if the transition configuration has changed.
// This serves as a heartbeat to ensure the execution client and Prysm are ready for the
// Bellatrix hard-fork transition.
ticker := time.NewTicker(checkTransitionPollingInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
err = s.engineAPIClient.ExchangeTransitionConfiguration(ctx, cfg)
s.handleExchangeConfigurationError(err)
}
}
}
// We check if there is a configuration mismatch error between the execution client
// and the Prysm beacon node. If so, we need to log errors in the node as it cannot successfully
// complete the merge transition for the Bellatrix hard fork.
func (s *Service) handleExchangeConfigurationError(err error) {
if err == nil {
// If there is no error in checking the exchange configuration error, we clear
// the run error of the service if we had previously set it to ErrConfigMismatch.
if errors.Is(s.runError, v1.ErrConfigMismatch) {
s.runError = nil
}
return
}
// If the error is a configuration mismatch, we set a runtime error in the service.
if errors.Is(err, v1.ErrConfigMismatch) {
s.runError = err
log.WithError(err).Error(configMismatchLog)
return
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}

View File

@@ -0,0 +1,59 @@
package powchain
import (
"context"
"errors"
"testing"
"time"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_checkTransitionConfiguration(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
m := &mocks.EngineClient{}
m.Err = errors.New("something went wrong")
srv := &Service{}
srv.engineAPIClient = m
checkTransitionPollingInterval = time.Millisecond
ctx, cancel := context.WithCancel(ctx)
go srv.checkTransitionConfiguration(ctx)
<-time.After(100 * time.Millisecond)
cancel()
require.LogsContain(t, hook, "Could not check configuration values")
}
func TestService_handleExchangeConfigurationError(t *testing.T) {
hook := logTest.NewGlobal()
t.Run("clears existing service error", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.runError = v1.ErrConfigMismatch
srv.handleExchangeConfigurationError(nil)
require.Equal(t, true, srv.Status() == nil)
})
t.Run("does not clear existing service error if wrong kind", func(t *testing.T) {
srv := &Service{isRunning: true}
err := errors.New("something else went wrong")
srv.runError = err
srv.handleExchangeConfigurationError(nil)
require.ErrorIs(t, err, srv.Status())
})
t.Run("sets service error on config mismatch", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.handleExchangeConfigurationError(v1.ErrConfigMismatch)
require.Equal(t, v1.ErrConfigMismatch, srv.Status())
require.LogsContain(t, hook, configMismatchLog)
})
t.Run("does not set service error if unrelated problem", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.handleExchangeConfigurationError(errors.New("foo"))
require.Equal(t, true, srv.Status() == nil)
require.LogsContain(t, hook, "Could not check configuration values")
})
}

View File

@@ -14,6 +14,7 @@ go_library(
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -28,6 +29,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -36,6 +38,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
@@ -42,9 +43,9 @@ type ForkchoiceUpdatedResponse struct {
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
}
// EngineCaller defines a client that can interact with an Ethereum
// Caller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
type Caller interface {
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
@@ -52,7 +53,7 @@ type EngineCaller interface {
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) (*pb.TransitionConfiguration, error)
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
}
@@ -155,35 +156,39 @@ func (c *Client) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Executi
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
func (c *Client) ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) (*pb.TransitionConfiguration, error) {
) error {
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
zeroBigNum := big.NewInt(0)
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
result := &pb.TransitionConfiguration{}
if err := c.rpc.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
return nil, handleRPCError(err)
return handleRPCError(err)
}
// We surface an error to the user if local configuration settings mismatch
// according to the response from the execution node.
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
return nil, errors.Wrapf(
ErrMismatchTerminalBlockHash,
return errors.Wrapf(
ErrConfigMismatch,
"got %#x from execution node, wanted %#x",
result.TerminalBlockHash,
cfgTerminalHash,
)
}
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
if ttdCfg != result.TerminalTotalDifficulty {
return nil, errors.Wrapf(
ErrMismatchTerminalTotalDiff,
ttdResult, err := hexutil.DecodeBig(result.TerminalTotalDifficulty)
if err != nil {
return errors.Wrap(err, "could not decode received terminal total difficulty")
}
if ttdResult.String() != ttdCfg {
return errors.Wrapf(
ErrConfigMismatch,
"got %s from execution node, wanted %s",
result.TerminalTotalDifficulty,
ttdResult.String(),
ttdCfg,
)
}
return result, nil
return nil
}
// LatestExecutionBlock fetches the latest execution engine block by calling

View File

@@ -13,7 +13,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -22,7 +24,10 @@ import (
"google.golang.org/protobuf/proto"
)
var _ = EngineCaller(&Client{})
var (
_ = Caller(&Client{})
_ = Caller(&mocks.EngineClient{})
)
func TestClient_IPC(t *testing.T) {
server := newTestIPCServer(t)
@@ -62,9 +67,8 @@ func TestClient_IPC(t *testing.T) {
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
err := client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
@@ -371,9 +375,8 @@ func TestClient_HTTP(t *testing.T) {
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
err = client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
arg := common.BytesToHash([]byte("foo"))
@@ -449,8 +452,8 @@ func TestExchangeTransitionConfiguration(t *testing.T) {
client := &Client{}
client.rpc = rpcClient
_, err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalBlockHash))
err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
})
t.Run("wrong terminal total difficulty", func(t *testing.T) {
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
@@ -465,7 +468,7 @@ func TestExchangeTransitionConfiguration(t *testing.T) {
}()
// Change the terminal block hash.
resp.TerminalTotalDifficulty = "bar"
resp.TerminalTotalDifficulty = "0x1"
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
@@ -482,8 +485,8 @@ func TestExchangeTransitionConfiguration(t *testing.T) {
client := &Client{}
client.rpc = rpcClient
_, err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalTotalDiff))
err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
})
}
@@ -678,9 +681,11 @@ func fixtures() map[string]interface{} {
},
PayloadId: &id,
}
b, _ := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
ttd, _ := uint256.FromBig(b)
transitionCfg := &pb.TransitionConfiguration{
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
TerminalTotalDifficulty: params.BeaconConfig().TerminalTotalDifficulty,
TerminalTotalDifficulty: ttd.Hex(),
TerminalBlockNumber: big.NewInt(0).Bytes(),
}
validStatus := &pb.PayloadStatus{

View File

@@ -21,6 +21,9 @@ var (
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrUnsupportedScheme for unsupported URL schemes.
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")
// ErrMismatchTerminalBlockHash when the terminal block hash value received via
// the API mismatches Prysm's configuration value.
ErrMismatchTerminalBlockHash = errors.New("terminal block hash mismatch")

View File

@@ -0,0 +1,12 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks",
visibility = ["//visibility:public"],
deps = [
"//proto/engine/v1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],
)

View File

@@ -0,0 +1,50 @@
package mocks
import (
"context"
"github.com/ethereum/go-ethereum/common"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
Err error
ExecutionBlock *pb.ExecutionBlock
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]byte, error) {
return e.NewPayloadResp, nil
}
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte) (*pb.ExecutionPayload, error) {
return e.ExecutionPayload, nil
}
// ExchangeTransitionConfiguration --
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
return e.Err
}
// LatestExecutionBlock --
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
return e.ExecutionBlock, nil
}
// ExecutionBlockByHash --
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, _ common.Hash) (*pb.ExecutionBlock, error) {
return e.ExecutionBlock, nil
}

View File

@@ -157,7 +157,7 @@ type Service struct {
headTicker *time.Ticker
httpLogger bind.ContractFilterer
eth1DataFetcher RPCDataFetcher
engineAPIClient *engine.Client
engineAPIClient engine.Caller
rpcClient RPCClient
headerCache *headerCache // cache to store block hash/block height.
latestEth1Data *ethpb.LatestETH1Data
@@ -217,6 +217,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return nil, errors.Wrap(err, "unable to initialize engine API client")
}
// Check transition configuration for the engine API client in the background.
go s.checkTransitionConfiguration(ctx)
if err := s.ensureValidPowchainData(ctx); err != nil {
return nil, errors.Wrap(err, "unable to validate powchain data")
}
@@ -299,15 +302,12 @@ func (s *Service) Status() error {
return nil
}
// get error from run function
if s.runError != nil {
return s.runError
}
return nil
return s.runError
}
// EngineAPIClient returns the associated engine API client to interact
// with an execution node via JSON-RPC.
func (s *Service) EngineAPIClient() *engine.Client {
func (s *Service) EngineAPIClient() engine.Caller {
return s.engineAPIClient
}

View File

@@ -107,5 +107,7 @@ go_test(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
"//cmd:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
],
)

View File

@@ -49,9 +49,9 @@ func NewBeaconNodes(config *e2etypes.E2EConfig) *BeaconNodeSet {
// Start starts all the beacon nodes in set.
func (s *BeaconNodeSet) Start(ctx context.Context) error {
if s.enr == "" {
return errors.New("empty ENR")
}
// if s.enr == "" {
// return errors.New("empty ENR")
// }
// Create beacon nodes.
nodes := make([]e2etypes.ComponentRunner, e2e.TestParams.BeaconNodeCount)
@@ -141,6 +141,12 @@ func (node *BeaconNode) Start(ctx context.Context) error {
args = append(args, features.E2EBeaconChainFlags...)
args = append(args, config.BeaconFlags...)
if config.NoModifyBeaconFlags {
log.Warn("Using config.BeaconFlags with no modifications")
args = config.BeaconFlags
args = append(args, fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()))
}
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
// Write stdout and stderr to log files.
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stdout.log", index)))
@@ -161,14 +167,15 @@ func (node *BeaconNode) Start(ctx context.Context) error {
}()
cmd.Stdout = stdout
cmd.Stderr = stderr
log.Infof("Starting beacon chain %d with flags: %s", index, strings.Join(args[2:], " "))
log.Infof("Starting beacon chain %d with flags: %s", index, strings.Join(args, " "))
if err = cmd.Start(); err != nil {
return fmt.Errorf("failed to start beacon node: %w", err)
}
log.Info("Beacon chain node started, waiting for gRPC server log message")
if err = helpers.WaitForTextInFile(stdOutFile, "gRPC server listening on port"); err != nil {
return fmt.Errorf("could not find multiaddr for node %d, this means the node had issues starting: %w", index, err)
}
log.Info("Found gRPC server log message, beacon chain node is ready")
if config.UseFixedPeerIDs {
peerId, err := helpers.FindFollowingTextInFile(stdOutFile, "Running node with peer id of ")

View File

@@ -11,7 +11,7 @@ import (
"strings"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
// "github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
@@ -167,7 +167,8 @@ func (node *LighthouseBeaconNode) Started() <-chan struct{} {
func (node *LighthouseBeaconNode) createTestnetDir(index int) (string, error) {
testNetDir := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-testnet-%d", index)
configPath := filepath.Join(testNetDir, "config.yaml")
rawYaml := params.E2EMainnetConfigYaml()
//rawYaml := params.E2EMainnetConfigYaml()
rawYaml := []byte{}
// Add in deposit contract in yaml
depContractStr := fmt.Sprintf("\nDEPOSIT_CONTRACT_ADDRESS: %#x", e2e.TestParams.ContractAddress)
rawYaml = append(rawYaml, []byte(depContractStr)...)

View File

@@ -150,7 +150,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
args = append(args, features.E2EValidatorFlags...)
}
if v.config.UseWeb3RemoteSigner {
args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
// args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
// Write the pubkeys as comma seperated hex strings with 0x prefix.
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
_, pubs, err := interop.DeterministicallyGenerateKeys(uint64(offset), uint64(validatorNum))
@@ -161,7 +161,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
for _, pub := range pubs {
hexPubs = append(hexPubs, "0x"+hex.EncodeToString(pub.Marshal()))
}
args = append(args, fmt.Sprintf("--%s=%s", flags.Web3SignerPublicValidatorKeysFlag.Name, strings.Join(hexPubs, ",")))
// args = append(args, fmt.Sprintf("--%s=%s", flags.Web3SignerPublicValidatorKeysFlag.Name, strings.Join(hexPubs, ",")))
} else {
// When not using remote key signer, use interop keys.
args = append(args,

View File

@@ -66,7 +66,7 @@ func (r *testRunner) run() {
t.Logf("Starting time: %s\n", time.Now().String())
t.Logf("Log Path: %s\n", e2e.TestParams.LogPath)
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
// minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
var keyGen, lighthouseValidatorNodes e2etypes.ComponentRunner
var lighthouseNodes *components.LighthouseBeaconNodeSet
@@ -74,10 +74,10 @@ func (r *testRunner) run() {
ctx, done := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx)
tracingSink := components.NewTracingSink(config.TracingSinkEndpoint)
g.Go(func() error {
return tracingSink.Start(ctx)
})
// tracingSink := components.NewTracingSink(config.TracingSinkEndpoint)
// g.Go(func() error {
// return tracingSink.Start(ctx)
// })
if multiClientActive {
keyGen = components.NewKeystoreGenerator()
@@ -89,38 +89,38 @@ func (r *testRunner) run() {
}
// ETH1 node.
eth1Node := components.NewEth1Node()
g.Go(func() error {
if err := eth1Node.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start eth1node")
}
return nil
})
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Node}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 node to run")
}
if err := components.SendAndMineDeposits(eth1Node.KeystorePath(), minGenesisActiveCount, 0, true /* partial */); err != nil {
return errors.Wrap(err, "failed to send and mine deposits")
}
return nil
})
// eth1Node := components.NewEth1Node()
// g.Go(func() error {
// if err := eth1Node.Start(ctx); err != nil {
// return errors.Wrap(err, "failed to start eth1node")
// }
// return nil
// })
// g.Go(func() error {
// if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Node}); err != nil {
// return errors.Wrap(err, "sending and mining deposits require ETH1 node to run")
// }
// if err := components.SendAndMineDeposits(eth1Node.KeystorePath(), minGenesisActiveCount, 0, true /* partial */); err != nil {
// return errors.Wrap(err, "failed to send and mine deposits")
// }
// return nil
// })
// Boot node.
bootNode := components.NewBootNode()
g.Go(func() error {
if err := bootNode.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start bootnode")
}
return nil
})
// bootNode := components.NewBootNode()
// g.Go(func() error {
// if err := bootNode.Start(ctx); err != nil {
// return errors.Wrap(err, "failed to start bootnode")
// }
// return nil
// })
// Beacon nodes.
beaconNodes := components.NewBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Node, bootNode}); err != nil {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{ /*eth1Node, bootNode*/ }); err != nil {
return errors.Wrap(err, "beacon nodes require ETH1 and boot node to run")
}
beaconNodes.SetENR(bootNode.ENR())
//:beaconNodes.SetENR(bootNode.ENR())
if err := beaconNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start beacon nodes")
}
@@ -139,48 +139,48 @@ func (r *testRunner) run() {
})
}
if multiClientActive {
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Node, bootNode, beaconNodes}); err != nil {
return errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run")
}
lighthouseNodes.SetENR(bootNode.ENR())
if err := lighthouseNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start lighthouse beacon nodes")
}
return nil
})
}
// if multiClientActive {
// lighthouseNodes = components.NewLighthouseBeaconNodes(config)
// g.Go(func() error {
// if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Node, bootNode, beaconNodes}); err != nil {
// return errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run")
// }
// lighthouseNodes.SetENR(bootNode.ENR())
// if err := lighthouseNodes.Start(ctx); err != nil {
// return errors.Wrap(err, "failed to start lighthouse beacon nodes")
// }
// return nil
// })
// }
// Validator nodes.
validatorNodes := components.NewValidatorNodeSet(config)
g.Go(func() error {
comps := []e2etypes.ComponentRunner{beaconNodes}
if config.UseWeb3RemoteSigner {
comps = append(comps, web3RemoteSigner)
}
if err := helpers.ComponentsStarted(ctx, comps); err != nil {
return errors.Wrap(err, "validator nodes require beacon nodes to run")
}
if err := validatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
// validatorNodes := components.NewValidatorNodeSet(config)
// g.Go(func() error {
// comps := []e2etypes.ComponentRunner{beaconNodes}
// if config.UseWeb3RemoteSigner {
// comps = append(comps, web3RemoteSigner)
// }
// if err := helpers.ComponentsStarted(ctx, comps); err != nil {
// return errors.Wrap(err, "validator nodes require beacon nodes to run")
// }
// if err := validatorNodes.Start(ctx); err != nil {
// return errors.Wrap(err, "failed to start validator nodes")
// }
// return nil
// })
if multiClientActive {
// Lighthouse Validator nodes.
lighthouseValidatorNodes = components.NewLighthouseValidatorNodeSet(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{keyGen, lighthouseNodes}); err != nil {
return errors.Wrap(err, "validator nodes require beacon nodes to run")
}
if err := lighthouseValidatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
}
// if multiClientActive {
// // Lighthouse Validator nodes.
// lighthouseValidatorNodes = components.NewLighthouseValidatorNodeSet(config)
// g.Go(func() error {
// if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{keyGen, lighthouseNodes}); err != nil {
// return errors.Wrap(err, "validator nodes require beacon nodes to run")
// }
// if err := lighthouseValidatorNodes.Start(ctx); err != nil {
// return errors.Wrap(err, "failed to start validator nodes")
// }
// return nil
// })
// }
// Run E2E evaluators and tests.
g.Go(func() error {
@@ -192,7 +192,8 @@ func (r *testRunner) run() {
// Wait for all required nodes to start.
requiredComponents := []e2etypes.ComponentRunner{
tracingSink, eth1Node, bootNode, beaconNodes, validatorNodes,
beaconNodes,
// tracingSink, eth1Node, bootNode, beaconNodes, validatorNodes,
}
if multiClientActive {
requiredComponents = append(requiredComponents, []e2etypes.ComponentRunner{keyGen, lighthouseNodes, lighthouseValidatorNodes}...)
@@ -224,7 +225,8 @@ func (r *testRunner) run() {
if config.TestDeposits {
log.Info("Running deposit tests")
r.testDeposits(ctx, g, eth1Node, []e2etypes.ComponentRunner{beaconNodes})
panic("bad")
// r.testDeposits(ctx, g, eth1Node, []e2etypes.ComponentRunner{beaconNodes})
}
// Create GRPC connection to beacon nodes.
@@ -247,12 +249,12 @@ func (r *testRunner) run() {
if !config.TestSync {
return nil
}
if err := r.testBeaconChainSync(ctx, g, conns, tickingStartTime, bootNode.ENR()); err != nil {
return errors.Wrap(err, "beacon chain sync test failed")
}
if err := r.testDoppelGangerProtection(ctx); err != nil {
return errors.Wrap(err, "doppel ganger protection check failed")
}
// if err := r.testBeaconChainSync(ctx, g, conns, tickingStartTime, bootNode.ENR()); err != nil {
// return errors.Wrap(err, "beacon chain sync test failed")
// }
// if err := r.testDoppelGangerProtection(ctx); err != nil {
// return errors.Wrap(err, "doppel ganger protection check failed")
// }
return nil
})
@@ -268,12 +270,12 @@ func (r *testRunner) run() {
// waitForChainStart allows to wait up until beacon nodes are started.
func (r *testRunner) waitForChainStart() {
// Sleep depending on the count of validators, as generating the genesis state could take some time.
time.Sleep(time.Duration(params.BeaconConfig().GenesisDelay) * time.Second)
// time.Sleep(time.Duration(params.BeaconConfig().GenesisDelay) * time.Second)
beaconLogFile, err := os.Open(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, 0)))
require.NoError(r.t, err)
r.t.Run("chain started", func(t *testing.T) {
require.NoError(t, helpers.WaitForTextInFile(beaconLogFile, "Chain started in sync service"), "Chain did not start")
r.t.Run("chain synced", func(t *testing.T) {
require.NoError(t, helpers.WaitForTextInFile(beaconLogFile, "Synced up to slot "), "Chain did not sync")
})
}

View File

@@ -60,7 +60,7 @@ func DeleteAndCreateFile(tmpPath, fileName string) (*os.File, error) {
// WaitForTextInFile checks a file every polling interval for the text requested.
func WaitForTextInFile(file *os.File, text string) error {
d := time.Now().Add(maxPollingWaitTime)
d := time.Now().Add(30 * time.Minute)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
@@ -168,11 +168,11 @@ func LogOutput(t *testing.T) {
}
LogErrorOutput(t, beaconLogFile, "beacon chain node", i)
validatorLogFile, err := os.Open(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.ValidatorLogFileName, i)))
if err != nil {
t.Fatal(err)
}
LogErrorOutput(t, validatorLogFile, "validator client", i)
// validatorLogFile, err := os.Open(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.ValidatorLogFileName, i)))
// if err != nil {
// t.Fatal(err)
// }
// LogErrorOutput(t, validatorLogFile, "validator client", i)
}
t.Logf("Ending time: %s\n", time.Now().String())

View File

@@ -6,9 +6,12 @@ import (
"strconv"
"testing"
cmdshared "github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2eParams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -18,8 +21,78 @@ func TestEndToEnd_MainnetConfig(t *testing.T) {
e2eMainnet(t, false /*usePrysmSh*/)
}
func TestEndToeNd_MainnetConfig_Prater(t *testing.T) {
// params.UseE2EMainnetConfig()
require.NoError(t, e2eParams.Init(1))
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
evals := []types.Evaluator{
// ev.PeersConnect,
// ev.HealthzCheck,
// ev.MetricsCheck,
// ev.ValidatorsAreActive,
// ev.ValidatorsParticipatingAtEpoch(2),
// ev.FinalizationOccurs(3),
// ev.ProposeVoluntaryExit,
// ev.ValidatorHasExited,
// ev.ColdStateCheckpoint,
// ev.ForkTransition,
// ev.APIMiddlewareVerifyIntegrity,
// ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
// ev.AllNodesHaveSameHead,
}
index := 0
testConfig := &types.E2EConfig{
BeaconFlags: []string{
"--" + cmdshared.DataDirFlag.Name + "=/tmp/beacon-chain-data",
// fmt.Sprintf("--%s=%s/eth2-beacon-node-%d", cmdshared.DataDirFlag.Name, e2e.TestParams.TestPath, index),
// fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
// fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.BeaconNodeRPCPort+index),
fmt.Sprintf("--%s=https://goerli.infura.io/v3/c0c32ccb156047038de5b28f343313bd", flags.HTTPWeb3ProviderFlag.Name),
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, e2e.TestParams.BeaconNodeCount-1),
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.BeaconNodeRPCPort+index+e2e.PrysmBeaconUDPOffset),
fmt.Sprintf("--%s=%d", cmdshared.P2PTCPPort.Name, e2e.TestParams.BeaconNodeRPCPort+index+e2e.PrysmBeaconTCPOffset),
// fmt.Sprintf("--%s=%d", cmdshared.P2PMaxPeers.Name, expectedNumOfPeers),
fmt.Sprintf("--%s=%d", flags.MonitoringPortFlag.Name, e2e.TestParams.BeaconNodeMetricsPort+index),
fmt.Sprintf("--%s=%d", flags.GRPCGatewayPort.Name, e2e.TestParams.BeaconNodeRPCPort+index+e2e.PrysmBeaconGatewayOffset),
// fmt.Sprintf("--%s=%d", flags.ContractDeploymentBlock.Name, 0),
// fmt.Sprintf("--%s=%d", flags.MinPeersPerSubnet.Name, 0),
fmt.Sprintf("--%s=%d", cmdshared.RPCMaxPageSizeFlag.Name, params.BeaconConfig().MinGenesisActiveValidatorCount),
// fmt.Sprintf("--%s=%s", cmdshared.BootstrapNode.Name, enr),
fmt.Sprintf("--%s=%s", cmdshared.VerbosityFlag.Name, "debug"),
// "--" + cmdshared.ForceClearDB.Name,
// "--" + cmdshared.E2EConfigFlag.Name,
"--" + cmdshared.AcceptTosFlag.Name,
"--" + flags.EnableDebugRPCEndpoints.Name,
"--prater",
"--dev",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestDeposits: false,
UseFixedPeerIDs: false,
UseValidatorCrossClient: false,
UsePrysmShValidator: false,
UsePprof: false,
NoModifyBeaconFlags: true,
Evaluators: evals,
}
newTestRunner(t, testConfig).run()
}
func e2eMainnet(t *testing.T, usePrysmSh bool) {
params.UseE2EMainnetConfig()
// params.UseE2EMainnetConfig()
require.NoError(t, e2eParams.InitMultiClient(e2eParams.StandardBeaconCount, e2eParams.StandardLighthouseNodeCount))
// Run for 10 epochs if not in long-running to confirm long-running has no issues.

View File

@@ -14,6 +14,7 @@ type E2EConfig struct {
TestSync bool
UsePrysmShValidator bool
UsePprof bool
NoModifyBeaconFlags bool
UseWeb3RemoteSigner bool
TestDeposits bool
UseFixedPeerIDs bool

View File

@@ -71,6 +71,9 @@ func TestServer_RefreshJWTSecretOnFileChange(t *testing.T) {
defer cancel()
go srv.refreshAuthTokenFromFileChanges(ctx, authTokenPath)
// Wait for service to be ready.
time.Sleep(time.Millisecond * 250)
// Update the auth token file with a new secret.
require.NoError(t, CreateAuthToken(walletDir, "localhost:7500"))