Add Slasher to E2E (#5061)

* Start adding "inject slashing into pool"

* Attempt at slashing

* Remove unneded

* Fix

* Begin adding slasher client to e2e

* Start slasher in e2e

* Get slashing detection working

* Get slashing evaluators working

* Progress on e2e

* Cleanup e2e

* Fix slasher e2e!

* lint

* Comment

* Fixes

* Improve accuracy of balance check

* REmove extra

* Remove extra

* Make more accurate
This commit is contained in:
Ivan Martinez
2020-03-15 01:09:23 -04:00
committed by GitHub
parent eeffa4fb30
commit 8143cc36bc
17 changed files with 400 additions and 110 deletions

View File

@@ -2,16 +2,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_test(
name = "go_default_test",
size = "enormous",
size = "large",
srcs = [
"demo_e2e_test.go",
"endtoend_test.go",
"minimal_e2e_test.go",
"minimal_slashing_e2e_test.go",
],
args = ["-test.v"],
data = [
"//beacon-chain",
"//validator",
"//slasher",
"@com_github_ethereum_go_ethereum//cmd/geth",
],
embed = [":go_default_library"],
@@ -43,6 +45,7 @@ go_library(
"epochTimer.go",
"eth1.go",
"helpers.go",
"slasher.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/endtoend",

View File

@@ -20,6 +20,7 @@ type end2EndConfig struct {
epochsToRun uint64
numValidators uint64
numBeaconNodes uint64
testSync bool
contractAddr common.Address
evaluators []ev.Evaluator
}
@@ -54,12 +55,13 @@ func startNewBeaconNode(t *testing.T, config *end2EndConfig, beaconNodes []*ev.B
}
args := []string{
fmt.Sprintf("--datadir=%s/eth2-beacon-node-%d", tmpPath, index),
fmt.Sprintf("--log-file=%s", stdOutFile.Name()),
"--force-clear-db",
"--no-discovery",
"--http-web3provider=http://127.0.0.1:8745",
"--web3provider=ws://127.0.0.1:8746",
fmt.Sprintf("--min-sync-peers=%d", config.numBeaconNodes),
fmt.Sprintf("--datadir=%s/eth2-beacon-node-%d", tmpPath, index),
fmt.Sprintf("--deposit-contract=%s", config.contractAddr.Hex()),
fmt.Sprintf("--rpc-port=%d", 4200+index),
fmt.Sprintf("--p2p-udp-port=%d", 12200+index),
@@ -68,7 +70,6 @@ func startNewBeaconNode(t *testing.T, config *end2EndConfig, beaconNodes []*ev.B
fmt.Sprintf("--grpc-gateway-port=%d", 3400+index),
fmt.Sprintf("--contract-deployment-block=%d", 0),
fmt.Sprintf("--rpc-max-page-size=%d", params.BeaconConfig().MinGenesisActiveValidatorCount),
fmt.Sprintf("--log-file=%s", stdOutFile.Name()),
}
args = append(args, config.beaconFlags...)
@@ -79,8 +80,8 @@ func startNewBeaconNode(t *testing.T, config *end2EndConfig, beaconNodes []*ev.B
}
}
t.Logf("Starting beacon chain %d with flags: %s", index, strings.Join(args, " "))
cmd := exec.Command(binaryPath, args...)
t.Logf("Starting beacon chain %d with flags: %s", index, strings.Join(args[2:], " "))
if err := cmd.Start(); err != nil {
t.Fatalf("Failed to start beacon node: %v", err)
}

View File

@@ -36,14 +36,6 @@ func runEndToEndTest(t *testing.T, config *end2EndConfig) {
defer logOutput(t, tmpPath, config)
defer killProcesses(t, processIDs)
if config.numBeaconNodes > 1 {
t.Run("all_peers_connect", func(t *testing.T) {
if err := ev.PeersConnect(beaconNodes); err != nil {
t.Fatalf("Failed to connect to peers: %v", err)
}
})
}
beaconLogFile, err := os.Open(path.Join(tmpPath, fmt.Sprintf(beaconNodeLogFileName, 0)))
if err != nil {
t.Fatal(err)
@@ -57,13 +49,18 @@ func runEndToEndTest(t *testing.T, config *end2EndConfig) {
return
}
conn, err := grpc.Dial("127.0.0.1:4200", grpc.WithInsecure())
if err != nil {
t.Fatalf("Failed to dial: %v", err)
}
beaconClient := eth.NewBeaconChainClient(conn)
nodeClient := eth.NewNodeClient(conn)
slasherPIDs := startSlashers(t, config)
defer killProcesses(t, slasherPIDs)
conns := make([]*grpc.ClientConn, len(beaconNodes))
for i := 0; i < len(conns); i++ {
conn, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", beaconNodes[i].RPCPort), grpc.WithInsecure())
if err != nil {
t.Fatalf("Failed to dial: %v", err)
}
conns[i] = conn
}
nodeClient := eth.NewNodeClient(conns[0])
genesis, err := nodeClient.GetGenesis(context.Background(), &ptypes.Empty{})
if err != nil {
t.Fatal(err)
@@ -80,16 +77,13 @@ func runEndToEndTest(t *testing.T, config *end2EndConfig) {
continue
}
t.Run(fmt.Sprintf(evaluator.Name, currentEpoch), func(t *testing.T) {
if err := evaluator.Evaluation(beaconClient); err != nil {
if err := evaluator.Evaluation(conns...); err != nil {
t.Errorf("evaluation failed for epoch %d: %v", currentEpoch, err)
}
})
}
if t.Failed() || currentEpoch >= config.epochsToRun {
if err := conn.Close(); err != nil {
t.Fatal(err)
}
ticker.Done()
if t.Failed() {
return
@@ -98,8 +92,17 @@ func runEndToEndTest(t *testing.T, config *end2EndConfig) {
}
}
if !config.testSync {
return
}
syncNodeInfo := startNewBeaconNode(t, config, beaconNodes)
beaconNodes = append(beaconNodes, syncNodeInfo)
syncConn, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", syncNodeInfo.RPCPort), grpc.WithInsecure())
if err != nil {
t.Fatalf("Failed to dial: %v", err)
}
conns = append(conns, syncConn)
index := uint64(len(beaconNodes) - 1)
// Sleep until the next epoch to give time for the newly started node to sync.
@@ -112,22 +115,18 @@ func runEndToEndTest(t *testing.T, config *end2EndConfig) {
if err != nil {
t.Fatal(err)
}
defer logErrorOutput(t, syncLogFile, "beacon chain node", index)
defer killProcesses(t, []int{syncNodeInfo.ProcessID})
if err := waitForTextInFile(syncLogFile, "Synced up to"); err != nil {
t.Fatalf("Failed to sync: %v", err)
}
t.Run("node_finishes_sync", func(t *testing.T) {
if err := ev.FinishedSyncing(syncNodeInfo.RPCPort); err != nil {
t.Fatal(err)
}
})
t.Run("all_nodes_have_correct_head", func(t *testing.T) {
if err := ev.AllChainsHaveSameHead(beaconNodes); err != nil {
t.Fatal(err)
}
})
defer logErrorOutput(t, syncLogFile, "beacon chain node", index)
defer killProcesses(t, []int{syncNodeInfo.ProcessID})
syncEvaluators := []ev.Evaluator{ev.FinishedSyncing, ev.AllNodesHaveSameHead}
for _, evaluator := range syncEvaluators {
t.Run(evaluator.Name, func(t *testing.T) {
if err := evaluator.Evaluation(conns...); err != nil {
t.Errorf("evaluation failed for sync node: %v", err)
}
})
}
}

View File

@@ -6,16 +6,22 @@ go_library(
srcs = [
"finality.go",
"node.go",
"slashing.go",
"types.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/endtoend/evaluators",
visibility = ["//endtoend:__subpackages__"],
deps = [
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
],
)

View File

@@ -7,6 +7,7 @@ import (
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"google.golang.org/grpc"
)
// FinalizationOccurs is an evaluator to make sure finalization is performing as it should.
@@ -17,7 +18,9 @@ var FinalizationOccurs = Evaluator{
Evaluation: finalizationOccurs,
}
func finalizationOccurs(client eth.BeaconChainClient) error {
func finalizationOccurs(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := eth.NewBeaconChainClient(conn)
chainHead, err := client.GetChainHead(context.Background(), &ptypes.Empty{})
if err != nil {
return errors.Wrap(err, "failed to get chain head")

View File

@@ -4,10 +4,6 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
@@ -16,46 +12,55 @@ import (
)
// PeersConnect checks all beacon nodes and returns whether they are connected to each other as peers.
func PeersConnect(beaconNodes []*BeaconNodeInfo) error {
for _, bNode := range beaconNodes {
response, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/p2p", bNode.MonitorPort))
if err != nil {
return errors.Wrap(err, "failed to reach p2p metrics page")
}
dataInBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if err := response.Body.Close(); err != nil {
return err
}
var PeersConnect = Evaluator{
Name: "peers_connect_epoch_%d",
Policy: onEpoch(0),
Evaluation: peersConnect,
}
// Subtracting by 2 here since the libp2p page has "3 peers" as text.
// With a starting index before the "p", going two characters back should give us
// the number we need.
startIdx := strings.Index(string(dataInBytes), "peers") - 2
if startIdx == -3 {
return fmt.Errorf("could not find needed text in %s", dataInBytes)
}
peerCount, err := strconv.Atoi(string(dataInBytes)[startIdx : startIdx+1])
// FinishedSyncing returns whether the beacon node with the given rpc port has finished syncing.
var FinishedSyncing = Evaluator{
Name: "finished_syncing",
Policy: func(currentEpoch uint64) bool { return true },
Evaluation: finishedSyncing,
}
// AllNodesHaveSameHead ensures all nodes have the same head epoch. Checks finality and justification as well.
// Not checking head block root as it may change irregularly for the validator connected nodes.
var AllNodesHaveSameHead = Evaluator{
Name: "all_nodes_have_same_head",
Policy: func(currentEpoch uint64) bool { return true },
Evaluation: allNodesHaveSameHead,
}
func onEpoch(epoch uint64) func(uint64) bool {
return func(currentEpoch uint64) bool {
return currentEpoch == epoch
}
}
func peersConnect(conns ...*grpc.ClientConn) error {
if len(conns) == 1 {
return nil
}
ctx := context.Background()
for _, conn := range conns {
nodeClient := eth.NewNodeClient(conn)
peersResp, err := nodeClient.ListPeers(ctx, &ptypes.Empty{})
if err != nil {
return err
}
expectedPeers := uint64(len(beaconNodes) - 1)
if expectedPeers != uint64(peerCount) {
return fmt.Errorf("unexpected amount of peers, expected %d, received %d", expectedPeers, peerCount)
expectedPeers := len(conns) - 1
if expectedPeers != len(peersResp.Peers) {
return fmt.Errorf("unexpected amount of peers, expected %d, received %d", expectedPeers, len(peersResp.Peers))
}
}
return nil
}
// FinishedSyncing returns whether the beacon node with the given rpc port has finished syncing.
func FinishedSyncing(rpcPort uint64) error {
syncConn, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", rpcPort), grpc.WithInsecure())
if err != nil {
return errors.Wrap(err, "failed to dial: %v")
}
syncNodeClient := eth.NewNodeClient(syncConn)
func finishedSyncing(conns ...*grpc.ClientConn) error {
conn := conns[0]
syncNodeClient := eth.NewNodeClient(conn)
syncStatus, err := syncNodeClient.GetSyncStatus(context.Background(), &ptypes.Empty{})
if err != nil {
return err
@@ -66,19 +71,12 @@ func FinishedSyncing(rpcPort uint64) error {
return nil
}
// AllChainsHaveSameHead connects to all RPC ports in the passed in array and ensures they have the same head epoch.
// Checks finality and justification as well.
// Not checking head block root as it may change irregularly for the validator connected nodes.
func AllChainsHaveSameHead(beaconNodes []*BeaconNodeInfo) error {
headEpochs := make([]uint64, len(beaconNodes))
justifiedRoots := make([][]byte, len(beaconNodes))
prevJustifiedRoots := make([][]byte, len(beaconNodes))
finalizedRoots := make([][]byte, len(beaconNodes))
for i, bNode := range beaconNodes {
conn, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", bNode.RPCPort), grpc.WithInsecure())
if err != nil {
return errors.Wrap(err, "Failed to dial")
}
func allNodesHaveSameHead(conns ...*grpc.ClientConn) error {
headEpochs := make([]uint64, len(conns))
justifiedRoots := make([][]byte, len(conns))
prevJustifiedRoots := make([][]byte, len(conns))
finalizedRoots := make([][]byte, len(conns))
for i, conn := range conns {
beaconClient := eth.NewBeaconChainClient(conn)
chainHead, err := beaconClient.GetChainHead(context.Background(), &ptypes.Empty{})
if err != nil {

View File

@@ -0,0 +1,120 @@
package evaluators
import (
"context"
"github.com/gogo/protobuf/types"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/prysmaticlabs/prysm/shared/testutil"
"google.golang.org/grpc"
)
// InjectDoubleVote broadcasts a double vote into the beacon node pool for the slasher to detect.
var InjectDoubleVote = Evaluator{
Name: "inject_double_vote_%d",
Policy: beforeEpoch(2),
Evaluation: insertDoubleAttestationIntoPool,
}
var slashedIndices []uint64
// Not including first epoch because of issues with genesis.
func beforeEpoch(epoch uint64) func(uint64) bool {
return func(currentEpoch uint64) bool {
return currentEpoch < epoch
}
}
func insertDoubleAttestationIntoPool(conns ...*grpc.ClientConn) error {
conn := conns[0]
valClient := eth.NewBeaconNodeValidatorClient(conn)
beaconClient := eth.NewBeaconChainClient(conn)
ctx := context.Background()
chainHead, err := beaconClient.GetChainHead(ctx, &types.Empty{})
if err != nil {
return err
}
_, privKeys, err := testutil.DeterministicDepositsAndKeys(64)
if err != nil {
return err
}
pubKeys := make([][]byte, len(privKeys))
for i, priv := range privKeys {
pubKeys[i] = priv.PublicKey().Marshal()
}
duties, err := valClient.GetDuties(ctx, &eth.DutiesRequest{
Epoch: chainHead.HeadEpoch,
PublicKeys: pubKeys,
})
if err != nil {
return err
}
var committeeIndex uint64
var committee []uint64
for _, duty := range duties.Duties {
if duty.AttesterSlot == chainHead.HeadSlot-1 {
committeeIndex = duty.CommitteeIndex
committee = duty.Committee
break
}
}
attDataReq := &eth.AttestationDataRequest{
CommitteeIndex: committeeIndex,
Slot: chainHead.HeadSlot - 1,
}
attData, err := valClient.GetAttestationData(ctx, attDataReq)
if err != nil {
return err
}
blockRoot := bytesutil.ToBytes32([]byte("muahahahaha I'm an evil validator"))
attData.BeaconBlockRoot = blockRoot[:]
dataRoot, err := ssz.HashTreeRoot(attData)
if err != nil {
return err
}
domainResp, err := valClient.DomainData(ctx, &eth.DomainRequest{
Epoch: attData.Target.Epoch,
Domain: params.BeaconConfig().DomainBeaconAttester[:],
})
if err != nil {
return err
}
valsToSlash := uint64(2)
for i := uint64(0); i < valsToSlash && i < uint64(len(committee)); i++ {
if len(sliceutil.IntersectionUint64(slashedIndices, []uint64{committee[i]})) > 0 {
valsToSlash++
continue
}
// Set the bits of half the committee to be slashed.
attBitfield := bitfield.NewBitlist(uint64(len(committee)))
attBitfield.SetBitAt(i, true)
att := &eth.Attestation{
AggregationBits: attBitfield,
Data: attData,
Signature: privKeys[committee[i]].Sign(dataRoot[:], domainResp.SignatureDomain).Marshal(),
}
for _, conn := range conns {
client := eth.NewBeaconNodeValidatorClient(conn)
_, err = client.ProposeAttestation(ctx, att)
if err != nil {
return err
}
}
slashedIndices = append(slashedIndices, committee[i])
}
return nil
}

View File

@@ -1,7 +1,7 @@
package evaluators
import (
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"google.golang.org/grpc"
)
// Evaluator defines the structure of the evaluators used to
@@ -9,7 +9,7 @@ import (
type Evaluator struct {
Name string
Policy func(currentEpoch uint64) bool
Evaluation func(client eth.BeaconChainClient) error
Evaluation func(conn ...*grpc.ClientConn) error
}
// BeaconNodeInfo contains the info of ports and other required information

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"google.golang.org/grpc"
)
// ValidatorsAreActive ensures the expected amount of validators are active.
@@ -23,6 +24,20 @@ var ValidatorsParticipating = Evaluator{
Evaluation: validatorsParticipating,
}
// ValidatorsSlashed ensures the expected amount of validators are slashed.
var ValidatorsSlashed = Evaluator{
Name: "validators_slashed_epoch_%d",
Policy: afterNthEpoch(0),
Evaluation: validatorsSlashed,
}
// SlashedValidatorsLoseBalance checks if the validators slashed lose the right balance.
var SlashedValidatorsLoseBalance = Evaluator{
Name: "slashed_validators_lose_valance_epoch_%d",
Policy: afterNthEpoch(0),
Evaluation: validatorsLoseBalance,
}
// Not including first epoch because of issues with genesis.
func afterNthEpoch(afterEpoch uint64) func(uint64) bool {
return func(currentEpoch uint64) bool {
@@ -30,7 +45,9 @@ func afterNthEpoch(afterEpoch uint64) func(uint64) bool {
}
}
func validatorsAreActive(client eth.BeaconChainClient) error {
func validatorsAreActive(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := eth.NewBeaconChainClient(conn)
// Balances actually fluctuate but we just want to check initial balance.
validatorRequest := &eth.ListValidatorsRequest{
PageSize: int32(params.BeaconConfig().MinGenesisActiveValidatorCount),
@@ -83,7 +100,9 @@ func validatorsAreActive(client eth.BeaconChainClient) error {
}
// validatorsParticipating ensures the validators have an acceptable participation rate.
func validatorsParticipating(client eth.BeaconChainClient) error {
func validatorsParticipating(conns ...*grpc.ClientConn) error {
conn := conns[0]
client := eth.NewBeaconChainClient(conn)
validatorRequest := &eth.GetValidatorParticipationRequest{}
participation, err := client.GetValidatorParticipation(context.Background(), validatorRequest)
if err != nil {
@@ -102,3 +121,49 @@ func validatorsParticipating(client eth.BeaconChainClient) error {
}
return nil
}
func validatorsSlashed(conns ...*grpc.ClientConn) error {
conn := conns[0]
ctx := context.Background()
client := eth.NewBeaconChainClient(conn)
req := &eth.GetValidatorActiveSetChangesRequest{}
changes, err := client.GetValidatorActiveSetChanges(ctx, req)
if err != nil {
return err
}
if len(changes.SlashedIndices) != 2 && len(changes.SlashedIndices) != 4 {
return fmt.Errorf("expected 2 or 4 indices to be slashed, received %d", len(changes.SlashedIndices))
}
return nil
}
func validatorsLoseBalance(conns ...*grpc.ClientConn) error {
conn := conns[0]
ctx := context.Background()
client := eth.NewBeaconChainClient(conn)
for i, indice := range slashedIndices {
req := &eth.GetValidatorRequest{
QueryFilter: &eth.GetValidatorRequest_Index{
Index: indice,
},
}
valResp, err := client.GetValidator(ctx, req)
if err != nil {
return err
}
slashedPenalty := params.BeaconConfig().MaxEffectiveBalance / params.BeaconConfig().MinSlashingPenaltyQuotient
slashedBal := params.BeaconConfig().MaxEffectiveBalance - slashedPenalty + params.BeaconConfig().EffectiveBalanceIncrement/10
if valResp.EffectiveBalance >= slashedBal {
return fmt.Errorf(
"expected slashed validator %d to balance less than %d, received %d",
i,
slashedBal,
valResp.EffectiveBalance,
)
}
}
return nil
}

View File

@@ -96,6 +96,12 @@ func logOutput(t *testing.T, tmpPath string, config *end2EndConfig) {
t.Fatal(err)
}
logErrorOutput(t, validatorLogFile, "validator client", i)
slasherLogFile, err := os.Open(path.Join(tmpPath, fmt.Sprintf(slasherLogFileName, i)))
if err != nil {
t.Fatal(err)
}
logErrorOutput(t, slasherLogFile, "slasher client", i)
}
t.Logf("Ending time: %s\n", time.Now().String())
}
@@ -112,17 +118,17 @@ func logErrorOutput(t *testing.T, file io.Reader, title string, index uint64) {
}
if len(errorLines) < 1 {
t.Logf("No error logs detected for %s %d", title, index)
return
}
t.Log("===================================================================")
t.Logf("Start of %s %d error output:\n", title, index)
t.Logf("==================== Start of %s %d error output ==================\n", title, index)
var lines uint64
for _, err := range errorLines {
lines++
if lines >= 10 {
break
}
t.Log(err)
}
t.Logf("\nEnd of %s %d error output:", title, index)
t.Log("===================================================================")
t.Logf("===================== End of %s %d error output ====================\n", title, index)
}

View File

@@ -19,7 +19,9 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
epochsToRun: 6,
numBeaconNodes: 4,
numValidators: params.BeaconConfig().MinGenesisActiveValidatorCount,
testSync: true,
evaluators: []ev.Evaluator{
ev.PeersConnect,
ev.ValidatorsAreActive,
ev.ValidatorsParticipating,
ev.FinalizationOccurs,

View File

@@ -0,0 +1,31 @@
package endtoend
import (
"testing"
ev "github.com/prysmaticlabs/prysm/endtoend/evaluators"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestEndToEnd_Slashing_MinimalConfig(t *testing.T) {
testutil.ResetCache()
params.UseMinimalConfig()
minimalConfig := &end2EndConfig{
beaconFlags: append(featureconfig.E2EBeaconChainFlags, "--minimal-config", "--custom-genesis-delay=15"),
validatorFlags: append(featureconfig.E2EValidatorFlags, "--minimal-config"),
epochsToRun: 2,
numBeaconNodes: 2,
numValidators: params.BeaconConfig().MinGenesisActiveValidatorCount,
testSync: false,
evaluators: []ev.Evaluator{
ev.PeersConnect,
ev.ValidatorsSlashed,
ev.SlashedValidatorsLoseBalance,
ev.InjectDoubleVote,
},
}
runEndToEndTest(t, minimalConfig)
}

60
endtoend/slasher.go Normal file
View File

@@ -0,0 +1,60 @@
package endtoend
import (
"fmt"
"os"
"os/exec"
"path"
"strings"
"testing"
"github.com/bazelbuild/rules_go/go/tools/bazel"
)
var slasherLogFileName = "slasher-%d.log"
// startSlasher starts a slasher client for use within E2E, connected to the first beacon node.
// It returns the process ID of the slasher.
func startSlashers(t *testing.T, config *end2EndConfig) []int {
tmpPath := config.tmpPath
binaryPath, found := bazel.FindBinary("slasher", "slasher")
if !found {
t.Log(binaryPath)
t.Fatal("Slasher binary not found")
}
var processIDs []int
for i := uint64(0); i < config.numBeaconNodes; i++ {
stdOutFile, err := deleteAndCreateFile(tmpPath, fmt.Sprintf(slasherLogFileName, i))
if err != nil {
t.Fatal(err)
}
args := []string{
fmt.Sprintf("--log-file=%s", stdOutFile.Name()),
fmt.Sprintf("--datadir=%s/slasher-data-%d/", tmpPath, i),
"--force-clear-db",
"--span-map-cache",
"--verbosity=debug",
fmt.Sprintf("--monitoring-port=%d", 3535+i),
fmt.Sprintf("--beacon-rpc-provider=localhost:%d", 4200+i),
}
t.Logf("Starting slasher %d with flags: %s", i, strings.Join(args[2:], " "))
cmd := exec.Command(binaryPath, args...)
if err := cmd.Start(); err != nil {
t.Fatalf("Failed to start slasher client: %v", err)
}
processIDs = append(processIDs, cmd.Process.Pid)
}
stdOutFile, err := os.Open(path.Join(tmpPath, fmt.Sprintf(slasherLogFileName, 0)))
if err != nil {
t.Fatal(err)
}
if err = waitForTextInFile(stdOutFile, "Beacon node is fully synced, starting slashing detection"); err != nil {
t.Fatalf("could not find starting logs for slasher, this means it had issues starting: %v", err)
}
return processIDs
}

View File

@@ -53,18 +53,18 @@ func initializeValidators(
t.Fatal(err)
}
args := []string{
fmt.Sprintf("--datadir=%s/eth2-val-%d", tmpPath, n),
fmt.Sprintf("--log-file=%s", file.Name()),
"--force-clear-db",
fmt.Sprintf("--interop-num-validators=%d", validatorsPerNode),
fmt.Sprintf("--interop-start-index=%d", validatorsPerNode*n),
fmt.Sprintf("--monitoring-port=%d", 9280+n),
fmt.Sprintf("--datadir=%s/eth2-val-%d", tmpPath, n),
fmt.Sprintf("--beacon-rpc-provider=localhost:%d", 4200+n),
fmt.Sprintf("--log-file=%s", file.Name()),
}
args = append(args, config.validatorFlags...)
cmd := exec.Command(binaryPath, args...)
t.Logf("Starting validator client %d with flags: %s", n, strings.Join(args, " "))
t.Logf("Starting validator client %d with flags: %s", n, strings.Join(args[2:], " "))
if err := cmd.Start(); err != nil {
t.Fatal(err)
}

View File

@@ -303,11 +303,9 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
var E2EBeaconChainFlags = []string{
"--enable-ssz-cache",
"--cache-proposer-indices",
"--cache-filtered-block-tree",
"--enable-skip-slots-cache",
"--enable-eth1-data-vote-cache",
"--proto-array-forkchoice",
"--enable-byte-mempool",
"--enable-state-gen-sig-verify",
"--check-head-state",

View File

@@ -79,5 +79,7 @@ docker_push(
go_binary(
name = "slasher",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
visibility = [
"//endtoend:__pkg__",
],
)

View File

@@ -143,18 +143,14 @@ func (ds *Service) detectHistoricalChainData(ctx context.Context) {
func (ds *Service) submitAttesterSlashings(ctx context.Context, slashings []*ethpb.AttesterSlashing, epoch uint64) {
ctx, span := trace.StartSpan(ctx, "detection.submitAttesterSlashings")
defer span.End()
var slashedIndices []uint64
if len(slashings) > 0 {
log.WithFields(logrus.Fields{
"targetEpoch": epoch,
"indices": slashedIndices,
}).Infof("Found %d attester slashings! Submitting to beacon node", len(slashings))
}
for i := 0; i < len(slashings); i++ {
slash := slashings[i]
if slash != nil && slash.Attestation_1 != nil && slash.Attestation_2 != nil {
slashableIndices := sliceutil.IntersectionUint64(slashings[i].Attestation_1.AttestingIndices, slashings[i].Attestation_2.AttestingIndices)
slashedIndices = append(slashedIndices, slashableIndices...)
log.WithFields(logrus.Fields{
"targetEpoch": epoch,
"indices": slashableIndices,
}).Infof("Found %d attester slashings! Submitting to beacon node", len(slashings))
ds.attesterSlashingsFeed.Send(slashings[i])
}
}