Scenario End To End Testing (#10696)

* add new changes

* fix it all

* add nicer scenario

* some more cleanup

* restructure tests

* godoc

* skip one scenario

* space

* fix test

* clean up

* fix conflicts

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
This commit is contained in:
Nishant Das
2022-05-20 17:34:10 +08:00
committed by GitHub
parent f28b47bd87
commit a984605064
23 changed files with 1248 additions and 406 deletions

View File

@@ -2,13 +2,16 @@ load("@prysm//tools/go:def.bzl", "go_test")
# gazelle:exclude geth_deps.go
# gazelle:exclude mainnet_e2e_test.go
# gazelle:exclude multiclient_e2e_test.go
# gazelle:exclude mainnet_scenario_e2e_test.go
# gazelle:exclude minimal_scenario_e2e_test.go
go_test(
name = "go_default_test",
size = "large",
testonly = True,
srcs = [
"component_handler_test.go",
"endtoend_setup_test.go",
"endtoend_test.go",
"minimal_e2e_test.go",
"minimal_slashing_e2e_test.go",
@@ -66,6 +69,8 @@ go_test(
size = "large",
testonly = True,
srcs = [
"component_handler_test.go",
"endtoend_setup_test.go",
"endtoend_test.go",
"mainnet_e2e_test.go",
],
@@ -119,13 +124,14 @@ go_test(
)
go_test(
name = "go_multiclient_test",
name = "go_mainnet_scenario_test",
size = "large",
testonly = True,
srcs = [
"component_handler_test.go",
"endtoend_setup_test.go",
"endtoend_test.go",
"mainnet_e2e_test.go",
"multiclient_e2e_test.go",
"mainnet_scenario_e2e_test.go",
],
args = ["-test.v"],
data = [
@@ -143,8 +149,67 @@ go_test(
"exclusive",
"mainnet",
"manual",
"multiclient",
"requires-network",
"scenario",
],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/endtoend/components:go_default_library",
"//testing/endtoend/components/eth1:go_default_library",
"//testing/endtoend/evaluators:go_default_library",
"//testing/endtoend/helpers:go_default_library",
"//testing/endtoend/params:go_default_library",
"//testing/endtoend/types:go_default_library",
"//testing/require:go_default_library",
"//testing/slasher/simulator:go_default_library",
"//testing/util:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
go_test(
name = "go_minimal_scenario_test",
size = "large",
testonly = True,
srcs = [
"component_handler_test.go",
"endtoend_setup_test.go",
"endtoend_test.go",
"minimal_scenario_e2e_test.go",
],
args = ["-test.v"],
data = [
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
],
eth_network = "minimal",
shard_count = 2,
tags = [
"exclusive",
"manual",
"minimal",
"requires-network",
"scenario",
],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",

View File

@@ -0,0 +1,203 @@
package endtoend
import (
"context"
"testing"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
"github.com/prysmaticlabs/prysm/testing/endtoend/components/eth1"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
"golang.org/x/sync/errgroup"
)
type componentHandler struct {
t *testing.T
cfg *e2etypes.E2EConfig
ctx context.Context
done func()
group *errgroup.Group
keygen e2etypes.ComponentRunner
tracingSink e2etypes.ComponentRunner
web3Signer e2etypes.ComponentRunner
bootnode e2etypes.ComponentRunner
eth1Miner e2etypes.ComponentRunner
eth1Nodes e2etypes.MultipleComponentRunners
beaconNodes e2etypes.MultipleComponentRunners
validatorNodes e2etypes.MultipleComponentRunners
lighthouseBeaconNodes e2etypes.MultipleComponentRunners
lighthouseValidatorNodes e2etypes.MultipleComponentRunners
}
func NewComponentHandler(cfg *e2etypes.E2EConfig, t *testing.T) *componentHandler {
return &componentHandler{cfg: cfg, t: t}
}
func (c *componentHandler) setup() {
t, config := c.t, c.cfg
t.Logf("Shard index: %d\n", e2e.TestParams.TestShardIndex)
t.Logf("Starting time: %s\n", time.Now().String())
t.Logf("Log Path: %s\n", e2e.TestParams.LogPath)
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
var keyGen e2etypes.ComponentRunner
var lighthouseValidatorNodes e2etypes.MultipleComponentRunners
var lighthouseNodes *components.LighthouseBeaconNodeSet
c.ctx, c.done = context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(c.ctx)
tracingSink := components.NewTracingSink(config.TracingSinkEndpoint)
g.Go(func() error {
return tracingSink.Start(ctx)
})
c.tracingSink = tracingSink
if multiClientActive {
keyGen = components.NewKeystoreGenerator()
// Generate lighthouse keystores.
g.Go(func() error {
return keyGen.Start(ctx)
})
c.keygen = keyGen
}
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
web3RemoteSigner = components.NewWeb3RemoteSigner()
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
c.web3Signer = web3RemoteSigner
}
// Boot node.
bootNode := components.NewBootNode()
g.Go(func() error {
if err := bootNode.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start bootnode")
}
return nil
})
c.bootnode = bootNode
// ETH1 miner.
eth1Miner := eth1.NewMiner()
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{bootNode}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
eth1Miner.SetBootstrapENR(bootNode.ENR())
if err := eth1Miner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start the ETH1 miner")
}
return nil
})
c.eth1Miner = eth1Miner
// ETH1 non-mining nodes.
eth1Nodes := eth1.NewNodeSet()
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Miner}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
eth1Nodes.SetMinerENR(eth1Miner.ENR())
if err := eth1Nodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start ETH1 nodes")
}
return nil
})
c.eth1Nodes = eth1Nodes
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
if err := components.SendAndMineDeposits(eth1Miner.KeystorePath(), minGenesisActiveCount, 0, true /* partial */); err != nil {
return errors.Wrap(err, "failed to send and mine deposits")
}
return nil
})
// Beacon nodes.
beaconNodes := components.NewBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode}); err != nil {
return errors.Wrap(err, "beacon nodes require ETH1 and boot node to run")
}
beaconNodes.SetENR(bootNode.ENR())
if err := beaconNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start beacon nodes")
}
return nil
})
c.beaconNodes = beaconNodes
if multiClientActive {
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode, beaconNodes}); err != nil {
return errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run")
}
lighthouseNodes.SetENR(bootNode.ENR())
if err := lighthouseNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start lighthouse beacon nodes")
}
return nil
})
c.lighthouseBeaconNodes = lighthouseNodes
}
// Validator nodes.
validatorNodes := components.NewValidatorNodeSet(config)
g.Go(func() error {
comps := []e2etypes.ComponentRunner{beaconNodes}
if config.UseWeb3RemoteSigner {
comps = append(comps, web3RemoteSigner)
}
if err := helpers.ComponentsStarted(ctx, comps); err != nil {
return errors.Wrap(err, "validator nodes require components to run")
}
if err := validatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
c.validatorNodes = validatorNodes
if multiClientActive {
// Lighthouse Validator nodes.
lighthouseValidatorNodes = components.NewLighthouseValidatorNodeSet(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{keyGen, lighthouseNodes}); err != nil {
return errors.Wrap(err, "validator nodes require beacon nodes to run")
}
if err := lighthouseValidatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
c.lighthouseValidatorNodes = lighthouseValidatorNodes
}
c.group = g
}
func (c *componentHandler) required() []e2etypes.ComponentRunner {
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
requiredComponents := []e2etypes.ComponentRunner{
c.tracingSink, c.eth1Nodes, c.bootnode, c.beaconNodes, c.validatorNodes,
}
if multiClientActive {
requiredComponents = append(requiredComponents, []e2etypes.ComponentRunner{c.keygen, c.lighthouseBeaconNodes, c.lighthouseValidatorNodes}...)
}
return requiredComponents
}

View File

@@ -4,15 +4,16 @@ package components
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/pkg/errors"
cmdshared "github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/features"
@@ -24,12 +25,14 @@ import (
var _ e2etypes.ComponentRunner = (*BeaconNode)(nil)
var _ e2etypes.ComponentRunner = (*BeaconNodeSet)(nil)
var _ e2etypes.MultipleComponentRunners = (*BeaconNodeSet)(nil)
var _ e2etypes.BeaconNodeSet = (*BeaconNodeSet)(nil)
// BeaconNodeSet represents set of beacon nodes.
type BeaconNodeSet struct {
e2etypes.ComponentRunner
config *e2etypes.E2EConfig
nodes []e2etypes.ComponentRunner
enr string
ids []string
started chan struct{}
@@ -59,6 +62,7 @@ func (s *BeaconNodeSet) Start(ctx context.Context) error {
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
nodes[i] = NewBeaconNode(s.config, i, s.enr)
}
s.nodes = nodes
// Wait for all nodes to finish their job (blocking).
// Once nodes are ready passed in handler function will be called.
@@ -79,6 +83,60 @@ func (s *BeaconNodeSet) Started() <-chan struct{} {
return s.started
}
// Pause pauses the component and its underlying process.
func (s *BeaconNodeSet) Pause() error {
for _, n := range s.nodes {
if err := n.Pause(); err != nil {
return err
}
}
return nil
}
// Resume resumes the component and its underlying process.
func (s *BeaconNodeSet) Resume() error {
for _, n := range s.nodes {
if err := n.Resume(); err != nil {
return err
}
}
return nil
}
// Stop stops the component and its underlying process.
func (s *BeaconNodeSet) Stop() error {
for _, n := range s.nodes {
if err := n.Stop(); err != nil {
return err
}
}
return nil
}
// PauseAtIndex pauses the component and its underlying process at the desired index.
func (s *BeaconNodeSet) PauseAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Pause()
}
// ResumeAtIndex resumes the component and its underlying process at the desired index.
func (s *BeaconNodeSet) ResumeAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Resume()
}
// StopAtIndex stops the component and its underlying process at the desired index.
func (s *BeaconNodeSet) StopAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Stop()
}
// BeaconNode represents beacon node.
type BeaconNode struct {
e2etypes.ComponentRunner
@@ -87,6 +145,7 @@ type BeaconNode struct {
index int
enr string
peerID string
cmd *exec.Cmd
}
// NewBeaconNode creates and returns a beacon node.
@@ -196,6 +255,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
// Mark node as ready.
close(node.started)
node.cmd = cmd
return cmd.Wait()
}
@@ -203,3 +263,18 @@ func (node *BeaconNode) Start(ctx context.Context) error {
func (node *BeaconNode) Started() <-chan struct{} {
return node.started
}
// Pause pauses the component and its underlying process.
func (node *BeaconNode) Pause() error {
return node.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (node *BeaconNode) Resume() error {
return node.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (node *BeaconNode) Stop() error {
return node.cmd.Process.Kill()
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"os/exec"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
@@ -21,6 +22,7 @@ type BootNode struct {
e2etypes.ComponentRunner
started chan struct{}
enr string
cmd *exec.Cmd
}
// NewBootNode creates and returns boot node.
@@ -74,6 +76,7 @@ func (node *BootNode) Start(ctx context.Context) error {
// Mark node as ready.
close(node.started)
node.cmd = cmd
return cmd.Wait()
}
@@ -83,6 +86,21 @@ func (node *BootNode) Started() <-chan struct{} {
return node.started
}
// Pause pauses the component and its underlying process.
func (node *BootNode) Pause() error {
return node.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (node *BootNode) Resume() error {
return node.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (node *BootNode) Stop() error {
return node.cmd.Process.Kill()
}
func enrFromLogFile(name string) (string, error) {
byteContent, err := os.ReadFile(name) // #nosec G304
if err != nil {

View File

@@ -24,6 +24,7 @@ const staticFilesPath = "/testing/endtoend/static-files/eth1"
const timeGapPerMiningTX = 250 * time.Millisecond
var _ e2etypes.ComponentRunner = (*NodeSet)(nil)
var _ e2etypes.MultipleComponentRunners = (*NodeSet)(nil)
var _ e2etypes.ComponentRunner = (*Miner)(nil)
var _ e2etypes.ComponentRunner = (*Node)(nil)

View File

@@ -9,6 +9,7 @@ import (
"os/exec"
"path"
"strings"
"syscall"
"time"
"github.com/bazelbuild/rules_go/go/tools/bazel"
@@ -33,6 +34,7 @@ type Miner struct {
bootstrapEnr string
enr string
keystorePath string
cmd *exec.Cmd
}
// NewMiner creates and returns an ETH1 node miner.
@@ -227,6 +229,7 @@ func (m *Miner) Start(ctx context.Context) error {
// Mark node as ready.
close(m.started)
m.cmd = runCmd
return runCmd.Wait()
}
@@ -235,6 +238,21 @@ func (m *Miner) Started() <-chan struct{} {
return m.started
}
// Pause pauses the component and its underlying process.
func (m *Miner) Pause() error {
return m.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (m *Miner) Resume() error {
return m.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop kills the component and its underlying process.
func (m *Miner) Stop() error {
return m.cmd.Process.Kill()
}
func enodeFromLogFile(name string) (string, error) {
byteContent, err := os.ReadFile(name) // #nosec G304
if err != nil {

View File

@@ -8,6 +8,7 @@ import (
"path"
"strconv"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/pkg/errors"
@@ -23,6 +24,7 @@ type Node struct {
started chan struct{}
index int
enr string
cmd *exec.Cmd
}
// NewNode creates and returns ETH1 node.
@@ -113,6 +115,7 @@ func (node *Node) Start(ctx context.Context) error {
// Mark node as ready.
close(node.started)
node.cmd = runCmd
return runCmd.Wait()
}
@@ -121,3 +124,18 @@ func (node *Node) Start(ctx context.Context) error {
func (node *Node) Started() <-chan struct{} {
return node.started
}
// Pause pauses the component and its underlying process.
func (node *Node) Pause() error {
return node.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (node *Node) Resume() error {
return node.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop kills the component and its underlying process.
func (node *Node) Stop() error {
return node.cmd.Process.Kill()
}

View File

@@ -3,6 +3,7 @@ package eth1
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
@@ -13,6 +14,7 @@ type NodeSet struct {
e2etypes.ComponentRunner
started chan struct{}
enr string
nodes []e2etypes.ComponentRunner
}
// NewNodeSet creates and returns a set of Eth1 nodes.
@@ -40,6 +42,7 @@ func (s *NodeSet) Start(ctx context.Context) error {
node := NewNode(i+1, s.enr)
nodes[i] = node
}
s.nodes = nodes
// Wait for all nodes to finish their job (blocking).
// Once nodes are ready passed in handler function will be called.
@@ -53,3 +56,57 @@ func (s *NodeSet) Start(ctx context.Context) error {
func (s *NodeSet) Started() <-chan struct{} {
return s.started
}
// Pause pauses the component and its underlying process.
func (s *NodeSet) Pause() error {
for _, n := range s.nodes {
if err := n.Pause(); err != nil {
return err
}
}
return nil
}
// Resume resumes the component and its underlying process.
func (s *NodeSet) Resume() error {
for _, n := range s.nodes {
if err := n.Resume(); err != nil {
return err
}
}
return nil
}
// Stop stops the component and its underlying process.
func (s *NodeSet) Stop() error {
for _, n := range s.nodes {
if err := n.Stop(); err != nil {
return err
}
}
return nil
}
// PauseAtIndex pauses the component and its underlying process at the desired index.
func (s *NodeSet) PauseAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Pause()
}
// ResumeAtIndex resumes the component and its underlying process at the desired index.
func (s *NodeSet) ResumeAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Resume()
}
// StopAtIndex stops the component and its underlying process at the desired index.
func (s *NodeSet) StopAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Stop()
}

View File

@@ -27,6 +27,7 @@ type TransactionGenerator struct {
keystore string
seed int64
started chan struct{}
cancel context.CancelFunc
}
func NewTransactionGenerator(keystore string, seed int64) *TransactionGenerator {
@@ -34,6 +35,10 @@ func NewTransactionGenerator(keystore string, seed int64) *TransactionGenerator
}
func (t *TransactionGenerator) Start(ctx context.Context) error {
// Wrap context with a cancel func
ctx, ccl := context.WithCancel(ctx)
t.cancel = ccl
client, err := rpc.DialHTTP(fmt.Sprintf("http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1RPCPort))
if err != nil {
return err
@@ -118,3 +123,19 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
}
return g.Wait()
}
// Pause pauses the component and its underlying process.
func (t *TransactionGenerator) Pause() error {
return nil
}
// Resume resumes the component and its underlying process.
func (t *TransactionGenerator) Resume() error {
return nil
}
// Stop stops the component and its underlying process.
func (t *TransactionGenerator) Stop() error {
t.cancel()
return nil
}

View File

@@ -2,7 +2,6 @@ package components
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
@@ -10,8 +9,10 @@ import (
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
@@ -21,6 +22,7 @@ import (
var _ e2etypes.ComponentRunner = (*LighthouseBeaconNode)(nil)
var _ e2etypes.ComponentRunner = (*LighthouseBeaconNodeSet)(nil)
var _ e2etypes.MultipleComponentRunners = (*LighthouseBeaconNodeSet)(nil)
var _ e2etypes.BeaconNodeSet = (*LighthouseBeaconNodeSet)(nil)
// LighthouseBeaconNodeSet represents set of lighthouse beacon nodes.
@@ -29,6 +31,7 @@ type LighthouseBeaconNodeSet struct {
config *e2etypes.E2EConfig
enr string
started chan struct{}
nodes []e2etypes.ComponentRunner
}
// SetENR assigns ENR to the set of beacon nodes.
@@ -55,6 +58,7 @@ func (s *LighthouseBeaconNodeSet) Start(ctx context.Context) error {
for i := 0; i < e2e.TestParams.LighthouseBeaconNodeCount; i++ {
nodes[i] = NewLighthouseBeaconNode(s.config, i, s.enr)
}
s.nodes = nodes
// Wait for all nodes to finish their job (blocking).
// Once nodes are ready passed in handler function will be called.
@@ -69,6 +73,60 @@ func (s *LighthouseBeaconNodeSet) Started() <-chan struct{} {
return s.started
}
// Pause pauses the component and its underlying process.
func (s *LighthouseBeaconNodeSet) Pause() error {
for _, n := range s.nodes {
if err := n.Pause(); err != nil {
return err
}
}
return nil
}
// Resume resumes the component and its underlying process.
func (s *LighthouseBeaconNodeSet) Resume() error {
for _, n := range s.nodes {
if err := n.Resume(); err != nil {
return err
}
}
return nil
}
// Stop stops the component and its underlying process.
func (s *LighthouseBeaconNodeSet) Stop() error {
for _, n := range s.nodes {
if err := n.Stop(); err != nil {
return err
}
}
return nil
}
// PauseAtIndex pauses the component and its underlying process at the desired index.
func (s *LighthouseBeaconNodeSet) PauseAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Pause()
}
// ResumeAtIndex resumes the component and its underlying process at the desired index.
func (s *LighthouseBeaconNodeSet) ResumeAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Resume()
}
// StopAtIndex stops the component and its underlying process at the desired index.
func (s *LighthouseBeaconNodeSet) StopAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Stop()
}
// LighthouseBeaconNode represents a lighthouse beacon node.
type LighthouseBeaconNode struct {
e2etypes.ComponentRunner
@@ -76,6 +134,7 @@ type LighthouseBeaconNode struct {
started chan struct{}
index int
enr string
cmd *exec.Cmd
}
// NewLighthouseBeaconNode creates and returns a lighthouse beacon node.
@@ -164,6 +223,7 @@ func (node *LighthouseBeaconNode) Start(ctx context.Context) error {
// Mark node as ready.
close(node.started)
node.cmd = cmd
return cmd.Wait()
}
@@ -173,6 +233,21 @@ func (node *LighthouseBeaconNode) Started() <-chan struct{} {
return node.started
}
// Pause pauses the component and its underlying process.
func (node *LighthouseBeaconNode) Pause() error {
return node.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (node *LighthouseBeaconNode) Resume() error {
return node.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (node *LighthouseBeaconNode) Stop() error {
return node.cmd.Process.Kill()
}
func (node *LighthouseBeaconNode) createTestnetDir(index int) (string, error) {
testNetDir := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-testnet-%d", index)
configPath := filepath.Join(testNetDir, "config.yaml")

View File

@@ -9,6 +9,7 @@ import (
"path"
"path/filepath"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/google/uuid"
@@ -25,12 +26,14 @@ import (
var _ e2etypes.ComponentRunner = (*LighthouseValidatorNode)(nil)
var _ e2etypes.ComponentRunner = (*LighthouseValidatorNodeSet)(nil)
var _ e2etypes.MultipleComponentRunners = (*LighthouseValidatorNodeSet)(nil)
// LighthouseValidatorNodeSet represents set of lighthouse validator nodes.
type LighthouseValidatorNodeSet struct {
e2etypes.ComponentRunner
config *e2etypes.E2EConfig
started chan struct{}
nodes []e2etypes.ComponentRunner
}
// NewLighthouseValidatorNodeSet creates and returns a set of lighthouse validator nodes.
@@ -59,6 +62,7 @@ func (s *LighthouseValidatorNodeSet) Start(ctx context.Context) error {
offsetIdx := i + prysmBeaconNum
nodes[i] = NewLighthouseValidatorNode(s.config, validatorsPerNode, i, validatorsPerNode*offsetIdx)
}
s.nodes = nodes
// Wait for all nodes to finish their job (blocking).
// Once nodes are ready passed in handler function will be called.
@@ -73,6 +77,60 @@ func (s *LighthouseValidatorNodeSet) Started() <-chan struct{} {
return s.started
}
// Pause pauses the component and its underlying process.
func (s *LighthouseValidatorNodeSet) Pause() error {
for _, n := range s.nodes {
if err := n.Pause(); err != nil {
return err
}
}
return nil
}
// Resume resumes the component and its underlying process.
func (s *LighthouseValidatorNodeSet) Resume() error {
for _, n := range s.nodes {
if err := n.Resume(); err != nil {
return err
}
}
return nil
}
// Stop stops the component and its underlying process.
func (s *LighthouseValidatorNodeSet) Stop() error {
for _, n := range s.nodes {
if err := n.Stop(); err != nil {
return err
}
}
return nil
}
// PauseAtIndex pauses the component and its underlying process at the desired index.
func (s *LighthouseValidatorNodeSet) PauseAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Pause()
}
// ResumeAtIndex resumes the component and its underlying process at the desired index.
func (s *LighthouseValidatorNodeSet) ResumeAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Resume()
}
// StopAtIndex stops the component and its underlying process at the desired index.
func (s *LighthouseValidatorNodeSet) StopAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Stop()
}
// LighthouseValidatorNode represents a lighthouse validator node.
type LighthouseValidatorNode struct {
e2etypes.ComponentRunner
@@ -81,6 +139,7 @@ type LighthouseValidatorNode struct {
validatorNum int
index int
offset int
cmd *exec.Cmd
}
// NewLighthouseValidatorNode creates and returns a lighthouse validator node.
@@ -155,6 +214,7 @@ func (v *LighthouseValidatorNode) Start(ctx context.Context) error {
// Mark node as ready.
close(v.started)
v.cmd = cmd
return cmd.Wait()
}
@@ -164,6 +224,21 @@ func (v *LighthouseValidatorNode) Started() <-chan struct{} {
return v.started
}
// Pause pauses the component and its underlying process.
func (v *LighthouseValidatorNode) Pause() error {
return v.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (v *LighthouseValidatorNode) Resume() error {
return v.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (v *LighthouseValidatorNode) Stop() error {
return v.cmd.Process.Kill()
}
type KeystoreGenerator struct {
started chan struct{}
}
@@ -199,6 +274,24 @@ func (k *KeystoreGenerator) Started() <-chan struct{} {
return k.started
}
// Pause pauses the component and its underlying process.
func (k *KeystoreGenerator) Pause() error {
// no-op
return nil
}
// Resume resumes the component and its underlying process.
func (k *KeystoreGenerator) Resume() error {
// no-op
return nil
}
// Stop stops the component and its underlying process.
func (k *KeystoreGenerator) Stop() error {
// no-op
return nil
}
func setupKeystores(valClientIdx, startIdx, numOfKeys int) (string, error) {
testNetDir := e2e.TestParams.TestPath + fmt.Sprintf("/lighthouse-validator-%d", valClientIdx)
if err := file.MkdirAll(testNetDir); err != nil {

View File

@@ -27,6 +27,7 @@ import (
// the Prysm repository to replay requests to a jaeger collector endpoint. This
// can then be used to visualize the spans themselves in the jaeger UI.
type TracingSink struct {
cancel context.CancelFunc
started chan struct{}
endpoint string
server *http.Server
@@ -45,6 +46,8 @@ func (ts *TracingSink) Start(ctx context.Context) error {
if ts.endpoint == "" {
return errors.New("empty endpoint provided")
}
ctx, cancelF := context.WithCancel(ctx)
ts.cancel = cancelF
go ts.initializeSink(ctx)
close(ts.started)
return nil
@@ -55,6 +58,22 @@ func (ts *TracingSink) Started() <-chan struct{} {
return ts.started
}
// Pause pauses the component and its underlying process.
func (ts *TracingSink) Pause() error {
return nil
}
// Resume resumes the component and its underlying process.
func (ts *TracingSink) Resume() error {
return nil
}
// Stop stops the component and its underlying process.
func (ts *TracingSink) Stop() error {
ts.cancel()
return nil
}
// Initialize an http handler that writes all requests to a file.
func (ts *TracingSink) initializeSink(ctx context.Context) {
mux := &http.ServeMux{}

View File

@@ -9,6 +9,7 @@ import (
"os/exec"
"path"
"strings"
"syscall"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
@@ -35,12 +36,14 @@ const depositGasLimit = 4000000
var _ e2etypes.ComponentRunner = (*ValidatorNode)(nil)
var _ e2etypes.ComponentRunner = (*ValidatorNodeSet)(nil)
var _ e2etypes.MultipleComponentRunners = (*ValidatorNodeSet)(nil)
// ValidatorNodeSet represents set of validator nodes.
type ValidatorNodeSet struct {
e2etypes.ComponentRunner
config *e2etypes.E2EConfig
started chan struct{}
nodes []e2etypes.ComponentRunner
}
// NewValidatorNodeSet creates and returns a set of validator nodes.
@@ -66,6 +69,7 @@ func (s *ValidatorNodeSet) Start(ctx context.Context) error {
for i := 0; i < prysmBeaconNodeNum; i++ {
nodes[i] = NewValidatorNode(s.config, validatorsPerNode, i, validatorsPerNode*i)
}
s.nodes = nodes
// Wait for all nodes to finish their job (blocking).
// Once nodes are ready passed in handler function will be called.
@@ -80,6 +84,60 @@ func (s *ValidatorNodeSet) Started() <-chan struct{} {
return s.started
}
// Pause pauses the component and its underlying process.
func (s *ValidatorNodeSet) Pause() error {
for _, n := range s.nodes {
if err := n.Pause(); err != nil {
return err
}
}
return nil
}
// Resume resumes the component and its underlying process.
func (s *ValidatorNodeSet) Resume() error {
for _, n := range s.nodes {
if err := n.Resume(); err != nil {
return err
}
}
return nil
}
// Stop stops the component and its underlying process.
func (s *ValidatorNodeSet) Stop() error {
for _, n := range s.nodes {
if err := n.Stop(); err != nil {
return err
}
}
return nil
}
// PauseAtIndex pauses the component and its underlying process at the desired index.
func (s *ValidatorNodeSet) PauseAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Pause()
}
// ResumeAtIndex resumes the component and its underlying process at the desired index.
func (s *ValidatorNodeSet) ResumeAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Resume()
}
// StopAtIndex stops the component and its underlying process at the desired index.
func (s *ValidatorNodeSet) StopAtIndex(i int) error {
if i >= len(s.nodes) {
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
}
return s.nodes[i].Stop()
}
// ValidatorNode represents a validator node.
type ValidatorNode struct {
e2etypes.ComponentRunner
@@ -88,6 +146,7 @@ type ValidatorNode struct {
validatorNum int
index int
offset int
cmd *exec.Cmd
}
// NewValidatorNode creates and returns a validator node.
@@ -203,6 +262,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
// Mark node as ready.
close(v.started)
v.cmd = cmd
return cmd.Wait()
}
@@ -212,6 +272,21 @@ func (v *ValidatorNode) Started() <-chan struct{} {
return v.started
}
// Pause pauses the component and its underlying process.
func (v *ValidatorNode) Pause() error {
return v.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (v *ValidatorNode) Resume() error {
return v.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (v *ValidatorNode) Stop() error {
return v.cmd.Process.Kill()
}
// SendAndMineDeposits sends the requested amount of deposits and mines the chain after to ensure the deposits are seen.
func SendAndMineDeposits(keystorePath string, validatorNum, offset int, partial bool) error {
client, err := rpc.DialHTTP(fmt.Sprintf("http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1RPCPort))

View File

@@ -12,6 +12,7 @@ import (
"path"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/bazelbuild/rules_go/go/tools/bazel"
@@ -130,6 +131,21 @@ func (w *Web3RemoteSigner) Started() <-chan struct{} {
return w.started
}
// Pause pauses the component and its underlying process.
func (w *Web3RemoteSigner) Pause() error {
return w.cmd.Process.Signal(syscall.SIGSTOP)
}
// Resume resumes the component and its underlying process.
func (w *Web3RemoteSigner) Resume() error {
return w.cmd.Process.Signal(syscall.SIGCONT)
}
// Stop stops the component and its underlying process.
func (w *Web3RemoteSigner) Stop() error {
return w.cmd.Process.Kill()
}
// monitorStart by polling server until it returns a 200 at /upcheck.
func (w *Web3RemoteSigner) monitorStart() {
client := &http.Client{}

View File

@@ -0,0 +1,181 @@
package endtoend
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2eParams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func e2eMinimal(t *testing.T, useWeb3RemoteSigner bool, extraEpochs uint64) *testRunner {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.E2ETestConfig().Copy())
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
// Run for 12 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
seed := 0
seedStr, isValid := os.LookupEnv("E2E_SEED")
if isValid {
seed, err = strconv.Atoi(seedStr)
require.NoError(t, err)
}
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
evals := []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsAreActive,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.VerifyBlockGraffiti,
ev.PeersCheck,
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ProcessesDepositsInBlocks,
ev.ActivatesDepositedValidators,
ev.DepositedValidatorsAreActive,
ev.ValidatorsVoteWithTheMajority,
ev.ColdStateCheckpoint,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
fmt.Sprintf("--slots-per-archive-point=%d", params.BeaconConfig().SlotsPerEpoch*16),
fmt.Sprintf("--tracing-endpoint=http://%s", tracingEndpoint),
"--enable-tracing",
"--trace-sample-fraction=1.0",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestFeature: true,
TestDeposits: true,
UsePrysmShValidator: false,
UsePprof: !longRunning,
UseWeb3RemoteSigner: useWeb3RemoteSigner,
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
EvalInterceptor: defaultInterceptor,
Seed: int64(seed),
ExtraEpochs: extraEpochs,
}
return newTestRunner(t, testConfig)
}
func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool) *testRunner {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.E2EMainnetTestConfig())
if useMultiClient {
require.NoError(t, e2eParams.InitMultiClient(e2eParams.StandardBeaconCount, e2eParams.StandardLighthouseNodeCount))
} else {
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
}
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
_, crossClient := os.LookupEnv("RUN_CROSS_CLIENT")
if usePrysmSh {
// If using prysm.sh, run for only 6 epochs.
// TODO(#9166): remove this block once v2 changes are live.
epochsToRun = helpers.AltairE2EForkEpoch - 1
}
seed := 0
seedStr, isValid := os.LookupEnv("E2E_SEED")
if isValid {
seed, err = strconv.Atoi(seedStr)
require.NoError(t, err)
}
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
evals := []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsAreActive,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ColdStateCheckpoint,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
fmt.Sprintf("--slots-per-archive-point=%d", params.BeaconConfig().SlotsPerEpoch*16),
fmt.Sprintf("--tracing-endpoint=http://%s", tracingEndpoint),
"--enable-tracing",
"--trace-sample-fraction=1.0",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestFeature: true,
TestDeposits: true,
UseFixedPeerIDs: true,
UseValidatorCrossClient: crossClient,
UsePrysmShValidator: usePrysmSh,
UsePprof: !longRunning,
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
EvalInterceptor: defaultInterceptor,
Seed: int64(seed),
}
return newTestRunner(t, testConfig)
}
func scenarioEvals() []types.Evaluator {
return []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.PeersCheck,
ev.VerifyBlockGraffiti,
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ColdStateCheckpoint,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
}
}

View File

@@ -48,8 +48,9 @@ func init() {
// testRunner abstracts E2E test configuration and running.
type testRunner struct {
t *testing.T
config *e2etypes.E2EConfig
t *testing.T
config *e2etypes.E2EConfig
comHandler *componentHandler
}
// newTestRunner creates E2E test runner.
@@ -62,236 +63,34 @@ func newTestRunner(t *testing.T, config *e2etypes.E2EConfig) *testRunner {
// run executes configured E2E test.
func (r *testRunner) run() {
t, config := r.t, r.config
t.Logf("Shard index: %d\n", e2e.TestParams.TestShardIndex)
t.Logf("Starting time: %s\n", time.Now().String())
t.Logf("Log Path: %s\n", e2e.TestParams.LogPath)
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
var keyGen, lighthouseValidatorNodes e2etypes.ComponentRunner
var lighthouseNodes *components.LighthouseBeaconNodeSet
ctx, done := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx)
tracingSink := components.NewTracingSink(config.TracingSinkEndpoint)
g.Go(func() error {
return tracingSink.Start(ctx)
})
if multiClientActive {
keyGen = components.NewKeystoreGenerator()
// Generate lighthouse keystores.
g.Go(func() error {
return keyGen.Start(ctx)
})
}
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
web3RemoteSigner = components.NewWeb3RemoteSigner()
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
// Boot node.
bootNode := components.NewBootNode()
g.Go(func() error {
if err := bootNode.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start bootnode")
}
return nil
})
// ETH1 miner.
eth1Miner := eth1.NewMiner()
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{bootNode}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
eth1Miner.SetBootstrapENR(bootNode.ENR())
if err := eth1Miner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start the ETH1 miner")
}
return nil
})
// ETH1 non-mining nodes.
eth1Nodes := eth1.NewNodeSet()
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Miner}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
eth1Nodes.SetMinerENR(eth1Miner.ENR())
if err := eth1Nodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start ETH1 nodes")
}
return nil
})
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes}); err != nil {
return errors.Wrap(err, "sending and mining deposits require ETH1 nodes to run")
}
if err := components.SendAndMineDeposits(eth1Miner.KeystorePath(), minGenesisActiveCount, 0, true /* partial */); err != nil {
return errors.Wrap(err, "failed to send and mine deposits")
}
return nil
})
// Beacon nodes.
beaconNodes := components.NewBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode}); err != nil {
return errors.Wrap(err, "beacon nodes require ETH1 and boot node to run")
}
beaconNodes.SetENR(bootNode.ENR())
if err := beaconNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start beacon nodes")
}
return nil
})
if multiClientActive {
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, bootNode, beaconNodes}); err != nil {
return errors.Wrap(err, "lighthouse beacon nodes require ETH1 and boot node to run")
}
lighthouseNodes.SetENR(bootNode.ENR())
if err := lighthouseNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start lighthouse beacon nodes")
}
return nil
})
}
// Validator nodes.
validatorNodes := components.NewValidatorNodeSet(config)
g.Go(func() error {
comps := []e2etypes.ComponentRunner{beaconNodes}
if config.UseWeb3RemoteSigner {
comps = append(comps, web3RemoteSigner)
}
if err := helpers.ComponentsStarted(ctx, comps); err != nil {
return errors.Wrap(err, "validator nodes require components to run")
}
if err := validatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
if multiClientActive {
// Lighthouse Validator nodes.
lighthouseValidatorNodes = components.NewLighthouseValidatorNodeSet(config)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{keyGen, lighthouseNodes}); err != nil {
return errors.Wrap(err, "validator nodes require beacon nodes to run")
}
if err := lighthouseValidatorNodes.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start validator nodes")
}
return nil
})
}
r.comHandler = NewComponentHandler(r.config, r.t)
r.comHandler.setup()
// Run E2E evaluators and tests.
g.Go(func() error {
// When everything is done, cancel parent context (will stop all spawned nodes).
defer func() {
log.Info("All E2E evaluations are finished, cleaning up")
done()
}()
r.addEvent(r.defaultEndToEndRun)
// Wait for all required nodes to start.
requiredComponents := []e2etypes.ComponentRunner{
tracingSink, eth1Nodes, bootNode, beaconNodes, validatorNodes,
}
if multiClientActive {
requiredComponents = append(requiredComponents, []e2etypes.ComponentRunner{keyGen, lighthouseNodes, lighthouseValidatorNodes}...)
}
ctxAllNodesReady, cancel := context.WithTimeout(ctx, allNodesStartTimeout)
defer cancel()
if err := helpers.ComponentsStarted(ctxAllNodesReady, requiredComponents); err != nil {
return errors.Wrap(err, "components take too long to start")
}
// Since defer unwraps in LIFO order, parent context will be closed only after logs are written.
defer helpers.LogOutput(t)
if config.UsePprof {
defer func() {
log.Info("Writing output pprof files")
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
assert.NoError(t, helpers.WritePprofFiles(e2e.TestParams.LogPath, i))
}
}()
}
// Blocking, wait period varies depending on number of validators.
r.waitForChainStart()
// Failing early in case chain doesn't start.
if t.Failed() {
return errors.New("chain cannot start")
}
r.testDepositsAndTx(ctx, g, eth1Miner.KeystorePath(), []e2etypes.ComponentRunner{beaconNodes})
// Create GRPC connection to beacon nodes.
conns, closeConns, err := helpers.NewLocalConnections(ctx, e2e.TestParams.BeaconNodeCount)
require.NoError(t, err, "Cannot create local connections")
defer closeConns()
// Calculate genesis time.
nodeClient := eth.NewNodeClient(conns[0])
genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
tickingStartTime := helpers.EpochTickerStartTime(genesis)
// Run assigned evaluators.
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
return errors.Wrap(err, "one or more evaluators failed")
}
// If requested, run sync test.
if !config.TestSync {
return nil
}
syncConn, err := r.testBeaconChainSync(ctx, g, conns, tickingStartTime, bootNode.ENR(), eth1Miner.ENR())
if err != nil {
return errors.Wrap(err, "beacon chain sync test failed")
}
conns = append(conns, syncConn)
if err := r.testDoppelGangerProtection(ctx); err != nil {
return errors.Wrap(err, "doppel ganger protection check failed")
}
if config.ExtraEpochs > 0 {
if err := r.waitExtra(ctx, types.Epoch(config.EpochsToRun+config.ExtraEpochs), conns[0], types.Epoch(config.ExtraEpochs)); err != nil {
return errors.Wrap(err, "error while waiting for ExtraEpochs")
}
syncEvaluators := []e2etypes.Evaluator{ev.FinishedSyncing, ev.AllNodesHaveSameHead}
for _, evaluator := range syncEvaluators {
t.Run(evaluator.Name, func(t *testing.T) {
assert.NoError(t, evaluator.Evaluation(conns...), "Evaluation failed for sync node")
})
}
}
return nil
})
if err := g.Wait(); err != nil && !errors.Is(err, context.Canceled) {
if err := r.comHandler.group.Wait(); err != nil && !errors.Is(err, context.Canceled) {
// At the end of the main evaluator goroutine all nodes are killed, no need to fail the test.
if strings.Contains(err.Error(), "signal: killed") {
return
}
t.Fatalf("E2E test ended in error: %v", err)
r.t.Fatalf("E2E test ended in error: %v", err)
}
}
func (r *testRunner) scenarioRunner() {
r.comHandler = NewComponentHandler(r.config, r.t)
r.comHandler.setup()
// Run E2E evaluators and tests.
r.addEvent(r.scenarioRun)
if err := r.comHandler.group.Wait(); err != nil && !errors.Is(err, context.Canceled) {
// At the end of the main evaluator goroutine all nodes are killed, no need to fail the test.
if strings.Contains(err.Error(), "signal: killed") {
return
}
r.t.Fatalf("E2E test ended in error: %v", err)
}
}
@@ -342,6 +141,9 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
for currentEpoch := range ticker.C() {
if config.EvalInterceptor(currentEpoch) {
continue
}
wg := new(sync.WaitGroup)
for _, eval := range config.Evaluators {
// Fix reference to evaluator as it will be running
@@ -498,3 +300,216 @@ func (r *testRunner) testDoppelGangerProtection(ctx context.Context) error {
}
return nil
}
func (r *testRunner) defaultEndToEndRun() error {
t, config, ctx, g := r.t, r.config, r.comHandler.ctx, r.comHandler.group
// When everything is done, cancel parent context (will stop all spawned nodes).
defer func() {
log.Info("All E2E evaluations are finished, cleaning up")
r.comHandler.done()
}()
// Wait for all required nodes to start.
ctxAllNodesReady, cancel := context.WithTimeout(ctx, allNodesStartTimeout)
defer cancel()
if err := helpers.ComponentsStarted(ctxAllNodesReady, r.comHandler.required()); err != nil {
return errors.Wrap(err, "components take too long to start")
}
// Since defer unwraps in LIFO order, parent context will be closed only after logs are written.
defer helpers.LogOutput(t)
if config.UsePprof {
defer func() {
log.Info("Writing output pprof files")
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
assert.NoError(t, helpers.WritePprofFiles(e2e.TestParams.LogPath, i))
}
}()
}
// Blocking, wait period varies depending on number of validators.
r.waitForChainStart()
// Failing early in case chain doesn't start.
if t.Failed() {
return errors.New("chain cannot start")
}
eth1Miner, ok := r.comHandler.eth1Miner.(*eth1.Miner)
if !ok {
return errors.New("incorrect component type")
}
beaconNodes, ok := r.comHandler.beaconNodes.(*components.BeaconNodeSet)
if !ok {
return errors.New("incorrect component type")
}
bootNode, ok := r.comHandler.bootnode.(*components.BootNode)
if !ok {
return errors.New("incorrect component type")
}
r.testDepositsAndTx(ctx, g, eth1Miner.KeystorePath(), []e2etypes.ComponentRunner{beaconNodes})
// Create GRPC connection to beacon nodes.
conns, closeConns, err := helpers.NewLocalConnections(ctx, e2e.TestParams.BeaconNodeCount)
require.NoError(t, err, "Cannot create local connections")
defer closeConns()
// Calculate genesis time.
nodeClient := eth.NewNodeClient(conns[0])
genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
tickingStartTime := helpers.EpochTickerStartTime(genesis)
// Run assigned evaluators.
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
return errors.Wrap(err, "one or more evaluators failed")
}
// If requested, run sync test.
if !config.TestSync {
return nil
}
syncConn, err := r.testBeaconChainSync(ctx, g, conns, tickingStartTime, bootNode.ENR(), eth1Miner.ENR())
if err != nil {
return errors.Wrap(err, "beacon chain sync test failed")
}
conns = append(conns, syncConn)
if err := r.testDoppelGangerProtection(ctx); err != nil {
return errors.Wrap(err, "doppel ganger protection check failed")
}
if config.ExtraEpochs > 0 {
if err := r.waitExtra(ctx, types.Epoch(config.EpochsToRun+config.ExtraEpochs), conns[0], types.Epoch(config.ExtraEpochs)); err != nil {
return errors.Wrap(err, "error while waiting for ExtraEpochs")
}
syncEvaluators := []e2etypes.Evaluator{ev.FinishedSyncing, ev.AllNodesHaveSameHead}
for _, evaluator := range syncEvaluators {
t.Run(evaluator.Name, func(t *testing.T) {
assert.NoError(t, evaluator.Evaluation(conns...), "Evaluation failed for sync node")
})
}
}
return nil
}
func (r *testRunner) scenarioRun() error {
t, config, ctx := r.t, r.config, r.comHandler.ctx
// When everything is done, cancel parent context (will stop all spawned nodes).
defer func() {
log.Info("All E2E evaluations are finished, cleaning up")
r.comHandler.done()
}()
// Wait for all required nodes to start.
ctxAllNodesReady, cancel := context.WithTimeout(ctx, allNodesStartTimeout)
defer cancel()
if err := helpers.ComponentsStarted(ctxAllNodesReady, r.comHandler.required()); err != nil {
return errors.Wrap(err, "components take too long to start")
}
// Since defer unwraps in LIFO order, parent context will be closed only after logs are written.
defer helpers.LogOutput(t)
if config.UsePprof {
defer func() {
log.Info("Writing output pprof files")
for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ {
assert.NoError(t, helpers.WritePprofFiles(e2e.TestParams.LogPath, i))
}
}()
}
// Blocking, wait period varies depending on number of validators.
r.waitForChainStart()
// Create GRPC connection to beacon nodes.
conns, closeConns, err := helpers.NewLocalConnections(ctx, e2e.TestParams.BeaconNodeCount)
require.NoError(t, err, "Cannot create local connections")
defer closeConns()
// Calculate genesis time.
nodeClient := eth.NewNodeClient(conns[0])
genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
tickingStartTime := helpers.EpochTickerStartTime(genesis)
// Run assigned evaluators.
return r.runEvaluators(conns, tickingStartTime)
}
func (r *testRunner) addEvent(ev func() error) {
r.comHandler.group.Go(ev)
}
func (r *testRunner) singleNodeOffline(epoch uint64) bool {
switch epoch {
case 9:
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
return true
case 10:
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
return true
case 11, 12:
// Allow 2 epochs for the network to finalize again.
return true
}
return false
}
func (r *testRunner) singleNodeOfflineMulticlient(epoch uint64) bool {
switch epoch {
case 9:
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.lighthouseBeaconNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.lighthouseValidatorNodes.PauseAtIndex(0))
return true
case 10:
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.lighthouseBeaconNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.lighthouseValidatorNodes.ResumeAtIndex(0))
return true
case 11, 12:
// Allow 2 epochs for the network to finalize again.
return true
}
return false
}
func (r *testRunner) eeOffline(epoch uint64) bool {
switch epoch {
case 9:
require.NoError(r.t, r.comHandler.eth1Miner.Pause())
return true
case 10:
require.NoError(r.t, r.comHandler.eth1Miner.Resume())
return true
case 11, 12:
// Allow 2 epochs for the network to finalize again.
return true
}
return false
}
func (r *testRunner) allValidatorsOffline(epoch uint64) bool {
switch epoch {
case 9:
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(1))
return true
case 10:
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(1))
return true
case 11, 12:
// Allow 2 epochs for the network to finalize again.
return true
}
return false
}
// All Epochs are valid.
func defaultInterceptor(_ uint64) bool {
return false
}

View File

@@ -1,92 +1,10 @@
package endtoend
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2eParams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
)
// Run mainnet e2e config with the current release validator against latest beacon node.
func TestEndToEnd_MainnetConfig_ValidatorAtCurrentRelease(t *testing.T) {
e2eMainnet(t, true, false)
}
func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.E2EMainnetTestConfig())
if useMultiClient {
require.NoError(t, e2eParams.InitMultiClient(e2eParams.StandardBeaconCount, e2eParams.StandardLighthouseNodeCount))
} else {
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
}
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
_, crossClient := os.LookupEnv("RUN_CROSS_CLIENT")
if usePrysmSh {
// If using prysm.sh, run for only 6 epochs.
// TODO(#9166): remove this block once v2 changes are live.
epochsToRun = helpers.AltairE2EForkEpoch - 1
}
seed := 0
seedStr, isValid := os.LookupEnv("E2E_SEED")
if isValid {
seed, err = strconv.Atoi(seedStr)
require.NoError(t, err)
}
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
evals := []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsAreActive,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ColdStateCheckpoint,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
fmt.Sprintf("--slots-per-archive-point=%d", params.BeaconConfig().SlotsPerEpoch*16),
fmt.Sprintf("--tracing-endpoint=http://%s", tracingEndpoint),
"--enable-tracing",
"--trace-sample-fraction=1.0",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestFeature: true,
TestDeposits: true,
UseFixedPeerIDs: true,
UseValidatorCrossClient: crossClient,
UsePrysmShValidator: usePrysmSh,
UsePprof: !longRunning,
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
Seed: int64(seed),
}
newTestRunner(t, testConfig).run()
e2eMainnet(t, true, false).run()
}

View File

@@ -0,0 +1,17 @@
package endtoend
import (
"testing"
)
func TestEndToEnd_MainnetConfig_MultiClient(t *testing.T) {
e2eMainnet(t, false /*usePrysmSh*/, true /*useMultiClient*/).run()
}
func TestEndToEnd_ScenarioRun_BeaconOffline_Multiclient(t *testing.T) {
runner := e2eMainnet(t, false /*usePrysmSh*/, true /*useMultiClient*/)
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.singleNodeOffline
runner.scenarioRunner()
}

View File

@@ -1,93 +1,13 @@
package endtoend
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ev "github.com/prysmaticlabs/prysm/testing/endtoend/evaluators"
e2eParams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestEndToEnd_MinimalConfig(t *testing.T) {
e2eMinimal(t, false, 3)
e2eMinimal(t, false, 3).run()
}
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
e2eMinimal(t, true, 0)
}
func e2eMinimal(t *testing.T, useWeb3RemoteSigner bool, extraEpochs uint64) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.E2ETestConfig().Copy())
require.NoError(t, e2eParams.Init(e2eParams.StandardBeaconCount))
// Run for 12 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 10
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
seed := 0
seedStr, isValid := os.LookupEnv("E2E_SEED")
if isValid {
seed, err = strconv.Atoi(seedStr)
require.NoError(t, err)
}
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
evals := []types.Evaluator{
ev.PeersConnect,
ev.HealthzCheck,
ev.MetricsCheck,
ev.ValidatorsAreActive,
ev.ValidatorsParticipatingAtEpoch(2),
ev.FinalizationOccurs(3),
ev.VerifyBlockGraffiti,
ev.PeersCheck,
ev.ProposeVoluntaryExit,
ev.ValidatorHasExited,
ev.ProcessesDepositsInBlocks,
ev.ActivatesDepositedValidators,
ev.DepositedValidatorsAreActive,
ev.ValidatorsVoteWithTheMajority,
ev.ColdStateCheckpoint,
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.APIMiddlewareVerifyIntegrity,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
ev.ValidatorSyncParticipation,
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
}
testConfig := &types.E2EConfig{
BeaconFlags: []string{
fmt.Sprintf("--slots-per-archive-point=%d", params.BeaconConfig().SlotsPerEpoch*16),
fmt.Sprintf("--tracing-endpoint=http://%s", tracingEndpoint),
"--enable-tracing",
"--trace-sample-fraction=1.0",
},
ValidatorFlags: []string{},
EpochsToRun: uint64(epochsToRun),
TestSync: true,
TestFeature: true,
TestDeposits: true,
UsePrysmShValidator: false,
UsePprof: !longRunning,
UseWeb3RemoteSigner: useWeb3RemoteSigner,
TracingSinkEndpoint: tracingEndpoint,
Evaluators: evals,
Seed: int64(seed),
ExtraEpochs: extraEpochs,
}
newTestRunner(t, testConfig).run()
e2eMinimal(t, true, 0).run()
}

View File

@@ -0,0 +1,28 @@
package endtoend
import "testing"
func TestEndToEnd_ScenarioRun_BeaconOffline(t *testing.T) {
runner := e2eMinimal(t, false, 0)
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.singleNodeOffline
runner.scenarioRunner()
}
func TestEndToEnd_ScenarioRun_AllvalidatorsOffline(t *testing.T) {
runner := e2eMinimal(t, false, 0)
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.allValidatorsOffline
runner.scenarioRunner()
}
func TestEndToEnd_ScenarioRun_EEOffline(t *testing.T) {
t.Skip("TODO(#10242) Prysm is current unable to handle an offline e2e")
runner := e2eMinimal(t, false, 0)
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.eeOffline
runner.scenarioRunner()
}

View File

@@ -36,6 +36,7 @@ func TestEndToEnd_Slasher_MinimalConfig(t *testing.T) {
ev.InjectDoubleVoteOnEpoch(2),
ev.InjectDoubleBlockOnEpoch(2),
},
EvalInterceptor: defaultInterceptor,
TracingSinkEndpoint: tracingEndpoint,
}

View File

@@ -1,9 +0,0 @@
package endtoend
import (
"testing"
)
func TestEndToEnd_MainnetConfig_MultiClient(t *testing.T) {
e2eMainnet(t, false /*usePrysmSh*/, true /*useMultiClient*/)
}

View File

@@ -23,6 +23,7 @@ type E2EConfig struct {
Seed int64
TracingSinkEndpoint string
Evaluators []Evaluator
EvalInterceptor func(uint64) bool
BeaconFlags []string
ValidatorFlags []string
PeerIDs []string
@@ -43,6 +44,22 @@ type ComponentRunner interface {
Start(ctx context.Context) error
// Started checks whether an underlying component is started and ready to be queried.
Started() <-chan struct{}
// Pause pauses a component.
Pause() error
// Resume resumes a component.
Resume() error
// Stop stops a component.
Stop() error
}
type MultipleComponentRunners interface {
ComponentRunner
// PauseAtIndex pauses the grouped component element at the desired index.
PauseAtIndex(i int) error
// ResumeAtIndex resumes the grouped component element at the desired index.
ResumeAtIndex(i int) error
// StopAtIndex stops the grouped component element at the desired index.
StopAtIndex(i int) error
}
// BeaconNodeSet defines an interface for an object that fulfills the duties