diff --git a/beacon-chain/state/stateutil/BUILD.bazel b/beacon-chain/state/stateutil/BUILD.bazel index 2f806d5eb5..af2c346e49 100644 --- a/beacon-chain/state/stateutil/BUILD.bazel +++ b/beacon-chain/state/stateutil/BUILD.bazel @@ -21,8 +21,7 @@ go_library( "//slasher:__subpackages__", "//tools/blocktree:__pkg__", "//tools/pcli:__pkg__", - "//validator/client/streaming:__pkg__", - "//validator/client/polling:__pkg__", + "//validator/client:__pkg__", ], deps = [ "//proto/beacon/p2p/v1:go_default_library", diff --git a/nogo_config.json b/nogo_config.json index 0693639b39..ca1a2b869b 100644 --- a/nogo_config.json +++ b/nogo_config.json @@ -13,8 +13,7 @@ }, "lostcancel": { "exclude_files": { - "validator/client/streaming/runner.go": "No need to cancel right when goroutines begin", - "validator/client/polling/runner.go": "No need to cancel right when goroutines begin", + "validator/client/runner.go": "No need to cancel right when goroutines begin", "external/.*": "Third party code" } }, diff --git a/shared/featureconfig/config.go b/shared/featureconfig/config.go index 6b02edb581..d2bae92796 100644 --- a/shared/featureconfig/config.go +++ b/shared/featureconfig/config.go @@ -33,7 +33,6 @@ type Flags struct { // Testnet Flags. AltonaTestnet bool // AltonaTestnet defines the flag through which we can enable the node to run on the altona testnet. // Feature related flags. - EnableStreamDuties bool // Enable streaming of validator duties instead of a polling-based approach. WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory. InitSyncNoVerify bool // InitSyncNoVerify when initial syncing w/o verifying block's contents. DisableDynamicCommitteeSubnets bool // Disables dynamic attestation committee subnets via p2p. @@ -252,10 +251,6 @@ func ConfigureValidator(ctx *cli.Context) { params.UseAltonaNetworkConfig() cfg.AltonaTestnet = true } - if ctx.Bool(enableStreamDuties.Name) { - log.Warn("Enabled validator duties streaming.") - cfg.EnableStreamDuties = true - } if ctx.Bool(enableProtectProposerFlag.Name) { log.Warn("Enabled validator proposal slashing protection.") cfg.ProtectProposer = true diff --git a/shared/featureconfig/flags.go b/shared/featureconfig/flags.go index 86fa3bbe02..dfbfa9fda3 100644 --- a/shared/featureconfig/flags.go +++ b/shared/featureconfig/flags.go @@ -131,10 +131,6 @@ var ( Name: "disable-reduce-attester-state-copy", Usage: "Disables the feature to reduce the amount of state copies for attester rpc", } - enableStreamDuties = &cli.BoolFlag{ - Name: "enable-stream-duties", - Usage: "Enables validator duties streaming in the validator client", - } disableGRPCConnectionLogging = &cli.BoolFlag{ Name: "disable-grpc-connection-logging", Usage: "Disables displaying logs for newly connected grpc clients", @@ -528,7 +524,6 @@ var deprecatedFlags = []cli.Flag{ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{ enableProtectAttesterFlag, enableProtectProposerFlag, - enableStreamDuties, enableExternalSlasherProtectionFlag, disableDomainDataCacheFlag, waitForSyncedFlag, @@ -546,7 +541,6 @@ var E2EValidatorFlags = []string{ "--wait-for-synced", "--enable-protect-attester", "--enable-protect-proposer", - // "--enable-stream-duties", // Currently disabled due to e2e flakes. } // BeaconChainFlags contains a list of all the feature flags that apply to the beacon-chain client. diff --git a/validator/BUILD.bazel b/validator/BUILD.bazel index 73a602d407..75eb5ef998 100644 --- a/validator/BUILD.bazel +++ b/validator/BUILD.bazel @@ -23,7 +23,7 @@ go_library( "//shared/version:go_default_library", "//validator/accounts/v1:go_default_library", "//validator/accounts/v2:go_default_library", - "//validator/client/streaming:go_default_library", + "//validator/client:go_default_library", "//validator/flags:go_default_library", "//validator/node:go_default_library", "@com_github_joonix_log//:go_default_library", @@ -65,7 +65,7 @@ go_image( "//shared/version:go_default_library", "//validator/accounts/v1:go_default_library", "//validator/accounts/v2:go_default_library", - "//validator/client/streaming:go_default_library", + "//validator/client:go_default_library", "//validator/flags:go_default_library", "//validator/node:go_default_library", "@com_github_joonix_log//:go_default_library", diff --git a/validator/client/streaming/BUILD.bazel b/validator/client/BUILD.bazel similarity index 96% rename from validator/client/streaming/BUILD.bazel rename to validator/client/BUILD.bazel index af68a8fe68..df9f0e5967 100644 --- a/validator/client/streaming/BUILD.bazel +++ b/validator/client/BUILD.bazel @@ -6,7 +6,6 @@ go_library( srcs = [ "aggregate.go", "attest.go", - "duties.go", "log.go", "metrics.go", "propose.go", @@ -14,7 +13,7 @@ go_library( "service.go", "validator.go", ], - importpath = "github.com/prysmaticlabs/prysm/validator/client/streaming", + importpath = "github.com/prysmaticlabs/prysm/validator/client", visibility = ["//validator:__subpackages__"], deps = [ "//beacon-chain/core/helpers:go_default_library", @@ -29,7 +28,6 @@ go_library( "//shared/params:go_default_library", "//shared/roughtime:go_default_library", "//shared/slotutil:go_default_library", - "//validator/client/metrics:go_default_library", "//validator/db:go_default_library", "//validator/db/kv:go_default_library", "//validator/keymanager/v1:go_default_library", @@ -43,6 +41,8 @@ go_library( "@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library", "@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_pkg_errors//:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library", @@ -63,7 +63,6 @@ go_test( srcs = [ "aggregate_test.go", "attest_test.go", - "duties_test.go", "fake_validator_test.go", "propose_test.go", "runner_test.go", diff --git a/validator/client/polling/aggregate.go b/validator/client/aggregate.go similarity index 92% rename from validator/client/polling/aggregate.go rename to validator/client/aggregate.go index d0d2fdd98c..3af0f7437d 100644 --- a/validator/client/polling/aggregate.go +++ b/validator/client/aggregate.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" @@ -11,7 +11,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" "go.opencensus.io/trace" ) @@ -30,7 +29,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu if err != nil { log.Errorf("Could not fetch validator assignment: %v", err) if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -49,7 +48,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu if err != nil { log.Errorf("Could not sign slot: %v", err) if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -68,7 +67,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu if err != nil { log.WithField("slot", slot).Errorf("Could not submit slot signature to beacon node: %v", err) if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -86,7 +85,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu if err != nil { log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err) if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -94,12 +93,12 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu if err := v.addIndicesToLog(duty); err != nil { log.Errorf("Could not add aggregator indices to logs: %v", err) if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } return } if v.emitAccountMetrics { - metrics.ValidatorAggSuccessVec.WithLabelValues(fmtKey).Inc() + ValidatorAggSuccessVec.WithLabelValues(fmtKey).Inc() } } diff --git a/validator/client/polling/aggregate_test.go b/validator/client/aggregate_test.go similarity index 99% rename from validator/client/polling/aggregate_test.go rename to validator/client/aggregate_test.go index f8c47f51de..1f15147bd0 100644 --- a/validator/client/polling/aggregate_test.go +++ b/validator/client/aggregate_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/attest.go b/validator/client/attest.go similarity index 94% rename from validator/client/polling/attest.go rename to validator/client/attest.go index ee0fee9b12..eec4fef968 100644 --- a/validator/client/polling/attest.go +++ b/validator/client/attest.go @@ -1,4 +1,4 @@ -package polling +package client import ( "bytes" @@ -18,7 +18,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -39,7 +38,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if err != nil { log.WithError(err).Error("Could not fetch validator assignment") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -58,7 +57,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if err != nil { log.WithError(err).Error("Could not request attestation to sign at slot") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -74,7 +73,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if err != nil { log.WithError(err).Error("Could not sign attestation") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -91,7 +90,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if !found { log.Errorf("Validator ID %d not found in committee of %v", duty.ValidatorIndex, duty.Committee) if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -108,7 +107,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if err != nil { log.WithError(err).Error("Could not submit attestation to beacon node") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -123,13 +122,13 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ if err := v.saveAttesterIndexToData(data, duty.ValidatorIndex); err != nil { log.WithError(err).Error("Could not save validator index for logging") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return } if v.emitAccountMetrics { - metrics.ValidatorAttestSuccessVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestSuccessVec.WithLabelValues(fmtKey).Inc() } span.AddAttributes( @@ -155,7 +154,7 @@ func (v *validator) preSigningValidations(ctx context.Context, indexedAtt *ethpb "targetEpoch": indexedAtt.Data.Target.Epoch, }).Error("Attempted to make a slashable attestation, rejected") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() } return fmt.Errorf("sourceEpoch: %dtargetEpoch: %d Attempted to make a slashable attestation,"+ " rejected by local slasher protection", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) @@ -168,7 +167,7 @@ func (v *validator) preSigningValidations(ctx context.Context, indexedAtt *ethpb "targetEpoch": indexedAtt.Data.Target.Epoch, }).Error("Attempted to make a slashable attestation, rejected by external slasher service") if v.emitAccountMetrics { - metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() } return fmt.Errorf("sourceEpoch: %dtargetEpoch: %d Attempted to make a slashable attestation,"+ " rejected by external slasher service", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) @@ -190,7 +189,7 @@ func (v *validator) postSignatureUpdate(ctx context.Context, indexedAtt *ethpb.I if featureconfig.Get().SlasherProtection && v.protector != nil { if !v.protector.CommitAttestation(ctx, indexedAtt) { if v.emitAccountMetrics { - metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() + ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() } return fmt.Errorf("made a slashable attestation, sourceEpoch: %dtargetEpoch: %d "+ " found by external slasher service", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) diff --git a/validator/client/polling/attest_test.go b/validator/client/attest_test.go similarity index 99% rename from validator/client/polling/attest_test.go rename to validator/client/attest_test.go index 5f13cd9f2b..b0610c2b03 100644 --- a/validator/client/polling/attest_test.go +++ b/validator/client/attest_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/fake_validator_test.go b/validator/client/fake_validator_test.go similarity index 99% rename from validator/client/polling/fake_validator_test.go rename to validator/client/fake_validator_test.go index 705dabc837..2bdb1eda36 100644 --- a/validator/client/polling/fake_validator_test.go +++ b/validator/client/fake_validator_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/log.go b/validator/client/log.go similarity index 98% rename from validator/client/polling/log.go rename to validator/client/log.go index 096f19bcb4..8d27a9a311 100644 --- a/validator/client/polling/log.go +++ b/validator/client/log.go @@ -1,4 +1,4 @@ -package polling +package client import ( "fmt" diff --git a/validator/client/polling/metrics.go b/validator/client/metrics.go similarity index 53% rename from validator/client/polling/metrics.go rename to validator/client/metrics.go index e09f857dbc..4caf0ad120 100644 --- a/validator/client/polling/metrics.go +++ b/validator/client/metrics.go @@ -1,16 +1,132 @@ -package polling +package client import ( "context" "fmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/validator/client/metrics" "github.com/sirupsen/logrus" ) +var ( + // ValidatorStatusesGaugeVec used to track validator statuses by public key. + ValidatorStatusesGaugeVec = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "validator", + Name: "statuses", + Help: "validator statuses: 0 UNKNOWN, 1 DEPOSITED, 2 PENDING, 3 ACTIVE, 4 EXITING, 5 SLASHING, 6 EXITED", + }, + []string{ + // Validator pubkey. + "pubkey", + }, + ) + // ValidatorAggSuccessVec used to count successful aggregations. + ValidatorAggSuccessVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "successful_aggregations", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorAggFailVec used to count failed aggregations. + ValidatorAggFailVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "failed_aggregations", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorProposeSuccessVec used to count successful proposals. + ValidatorProposeSuccessVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "successful_proposals", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorProposeFailVec used to count failed proposals. + ValidatorProposeFailVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "failed_proposals", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorProposeFailVecSlasher used to count failed proposals by slashing protection. + ValidatorProposeFailVecSlasher = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "validator_proposals_rejected_total", + Help: "Count the block proposals rejected by slashing protection.", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorBalancesGaugeVec used to keep track of validator balances by public key. + ValidatorBalancesGaugeVec = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "validator", + Name: "balance", + Help: "current validator balance.", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorAttestSuccessVec used to count successful attestations. + ValidatorAttestSuccessVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "successful_attestations", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorAttestFailVec used to count failed attestations. + ValidatorAttestFailVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "validator", + Name: "failed_attestations", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) + // ValidatorAttestFailVecSlasher used to count failed attestations by slashing protection. + ValidatorAttestFailVecSlasher = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "validator_attestations_rejected_total", + Help: "Count the attestations rejected by slashing protection.", + }, + []string{ + // validator pubkey + "pubkey", + }, + ) +) + // LogValidatorGainsAndLosses logs important metrics related to this validator client's // responsibilities throughout the beacon chain's lifecycle. It logs absolute accrued rewards // and penalties over time, percentage gain/loss, and gives the end user a better idea @@ -41,7 +157,7 @@ func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64) if v.emitAccountMetrics { for _, missingPubKey := range resp.MissingValidators { fmtKey := fmt.Sprintf("%#x", missingPubKey[:]) - metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0) + ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0) } } @@ -79,7 +195,7 @@ func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64) "percentChange": fmt.Sprintf("%.5f%%", percentNet*100), }).Info("Previous epoch voting summary") if v.emitAccountMetrics { - metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(newBalance) + ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(newBalance) } } diff --git a/validator/client/metrics/BUILD.bazel b/validator/client/metrics/BUILD.bazel deleted file mode 100644 index 95e19da646..0000000000 --- a/validator/client/metrics/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@prysm//tools/go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importpath = "github.com/prysmaticlabs/prysm/validator/client/metrics", - visibility = ["//validator/client:__subpackages__"], - deps = [ - "@com_github_prometheus_client_golang//prometheus:go_default_library", - "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", - ], -) diff --git a/validator/client/metrics/metrics.go b/validator/client/metrics/metrics.go deleted file mode 100644 index f6ed5efcf4..0000000000 --- a/validator/client/metrics/metrics.go +++ /dev/null @@ -1,121 +0,0 @@ -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -var ( - // ValidatorStatusesGaugeVec used to track validator statuses by public key. - ValidatorStatusesGaugeVec = promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: "validator", - Name: "statuses", - Help: "validator statuses: 0 UNKNOWN, 1 DEPOSITED, 2 PENDING, 3 ACTIVE, 4 EXITING, 5 SLASHING, 6 EXITED", - }, - []string{ - // Validator pubkey. - "pubkey", - }, - ) - // ValidatorAggSuccessVec used to count successful aggregations. - ValidatorAggSuccessVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "successful_aggregations", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorAggFailVec used to count failed aggregations. - ValidatorAggFailVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "failed_aggregations", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorProposeSuccessVec used to count successful proposals. - ValidatorProposeSuccessVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "successful_proposals", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorProposeFailVec used to count failed proposals. - ValidatorProposeFailVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "failed_proposals", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorProposeFailVecSlasher used to count failed proposals by slashing protection. - ValidatorProposeFailVecSlasher = promauto.NewCounterVec( - prometheus.CounterOpts{ - Name: "validator_proposals_rejected_total", - Help: "Count the block proposals rejected by slashing protection.", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorBalancesGaugeVec used to keep track of validator balances by public key. - ValidatorBalancesGaugeVec = promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: "validator", - Name: "balance", - Help: "current validator balance.", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorAttestSuccessVec used to count successful attestations. - ValidatorAttestSuccessVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "successful_attestations", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorAttestFailVec used to count failed attestations. - ValidatorAttestFailVec = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "validator", - Name: "failed_attestations", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) - // ValidatorAttestFailVecSlasher used to count failed attestations by slashing protection. - ValidatorAttestFailVecSlasher = promauto.NewCounterVec( - prometheus.CounterOpts{ - Name: "validator_attestations_rejected_total", - Help: "Count the attestations rejected by slashing protection.", - }, - []string{ - // validator pubkey - "pubkey", - }, - ) -) diff --git a/validator/client/polling/BUILD.bazel b/validator/client/polling/BUILD.bazel deleted file mode 100644 index df02afee28..0000000000 --- a/validator/client/polling/BUILD.bazel +++ /dev/null @@ -1,98 +0,0 @@ -load("@prysm//tools/go:def.bzl", "go_library") -load("@io_bazel_rules_go//go:def.bzl", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "aggregate.go", - "attest.go", - "log.go", - "metrics.go", - "propose.go", - "runner.go", - "service.go", - "validator.go", - ], - importpath = "github.com/prysmaticlabs/prysm/validator/client/polling", - visibility = ["//validator:__subpackages__"], - deps = [ - "//beacon-chain/core/helpers:go_default_library", - "//beacon-chain/state/stateutil:go_default_library", - "//proto/slashing:go_default_library", - "//shared/blockutil:go_default_library", - "//shared/bls:go_default_library", - "//shared/bytesutil:go_default_library", - "//shared/featureconfig:go_default_library", - "//shared/grpcutils:go_default_library", - "//shared/hashutil:go_default_library", - "//shared/params:go_default_library", - "//shared/roughtime:go_default_library", - "//shared/slotutil:go_default_library", - "//validator/client/metrics:go_default_library", - "//validator/db:go_default_library", - "//validator/db/kv:go_default_library", - "//validator/keymanager/v1:go_default_library", - "//validator/slashing-protection:go_default_library", - "@com_github_dgraph_io_ristretto//:go_default_library", - "@com_github_gogo_protobuf//proto:go_default_library", - "@com_github_gogo_protobuf//types:go_default_library", - "@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library", - "@com_github_grpc_ecosystem_go_grpc_middleware//retry:go_default_library", - "@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library", - "@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library", - "@com_github_hashicorp_golang_lru//:go_default_library", - "@com_github_pkg_errors//:go_default_library", - "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", - "@com_github_prysmaticlabs_go_bitfield//:go_default_library", - "@com_github_prysmaticlabs_go_ssz//:go_default_library", - "@com_github_sirupsen_logrus//:go_default_library", - "@io_opencensus_go//plugin/ocgrpc:go_default_library", - "@io_opencensus_go//trace:go_default_library", - "@org_golang_google_grpc//:go_default_library", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//credentials:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "aggregate_test.go", - "attest_test.go", - "fake_validator_test.go", - "propose_test.go", - "runner_test.go", - "service_test.go", - "validator_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//beacon-chain/core/helpers:go_default_library", - "//proto/slashing:go_default_library", - "//shared:go_default_library", - "//shared/bls:go_default_library", - "//shared/bytesutil:go_default_library", - "//shared/featureconfig:go_default_library", - "//shared/keystore:go_default_library", - "//shared/mock:go_default_library", - "//shared/params:go_default_library", - "//shared/roughtime:go_default_library", - "//shared/slotutil:go_default_library", - "//shared/testutil:go_default_library", - "//validator/accounts/v1:go_default_library", - "//validator/db/testing:go_default_library", - "//validator/keymanager/v1:go_default_library", - "//validator/testing:go_default_library", - "@com_github_gogo_protobuf//types:go_default_library", - "@com_github_golang_mock//gomock:go_default_library", - "@com_github_hashicorp_golang_lru//:go_default_library", - "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", - "@com_github_prysmaticlabs_go_bitfield//:go_default_library", - "@com_github_sirupsen_logrus//:go_default_library", - "@com_github_sirupsen_logrus//hooks/test:go_default_library", - "@in_gopkg_d4l3k_messagediff_v1//:go_default_library", - ], -) diff --git a/validator/client/polling/propose.go b/validator/client/propose.go similarity index 90% rename from validator/client/polling/propose.go rename to validator/client/propose.go index 88cb204df0..a1227f9af1 100644 --- a/validator/client/polling/propose.go +++ b/validator/client/propose.go @@ -1,4 +1,4 @@ -package polling +package client // Validator client proposer functions. import ( @@ -15,7 +15,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/validator/client/metrics" km "github.com/prysmaticlabs/prysm/validator/keymanager/v1" "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -44,7 +43,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err != nil { log.WithError(err).Error("Failed to sign randao reveal") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -58,7 +57,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err != nil { log.WithField("blockSlot", slot).WithError(err).Error("Failed to request block from beacon node") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -69,7 +68,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err != nil { log.WithError(err).Error("Failed to get proposal history") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -78,7 +77,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if slotBits.BitAt(slot % params.BeaconConfig().SlotsPerEpoch) { log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -91,7 +90,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if !v.protector.VerifyBlock(ctx, bh) { log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected by external slasher") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() } return } @@ -102,7 +101,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err != nil { log.WithError(err).Error("Failed to sign block") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -116,7 +115,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err != nil { log.WithError(err).Error("Failed to propose block") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } @@ -129,7 +128,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if !v.protector.CommitBlock(ctx, sbh) { log.WithField("epoch", epoch).Fatal("Tried to sign a double proposal, rejected by external slasher") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() } return } @@ -139,14 +138,14 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by if err := v.db.SaveProposalHistoryForEpoch(ctx, pubKey[:], epoch, slotBits); err != nil { log.WithError(err).Error("Failed to save updated proposal history") if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } } if v.emitAccountMetrics { - metrics.ValidatorProposeSuccessVec.WithLabelValues(fmtKey).Inc() + ValidatorProposeSuccessVec.WithLabelValues(fmtKey).Inc() } span.AddAttributes( diff --git a/validator/client/polling/propose_test.go b/validator/client/propose_test.go similarity index 99% rename from validator/client/polling/propose_test.go rename to validator/client/propose_test.go index 206974e89e..685d731185 100644 --- a/validator/client/polling/propose_test.go +++ b/validator/client/propose_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/runner.go b/validator/client/runner.go similarity index 99% rename from validator/client/polling/runner.go rename to validator/client/runner.go index aeb8a359b4..1aaa282500 100644 --- a/validator/client/polling/runner.go +++ b/validator/client/runner.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/runner_test.go b/validator/client/runner_test.go similarity index 99% rename from validator/client/polling/runner_test.go rename to validator/client/runner_test.go index 4f3b3b5005..9cb8816aac 100644 --- a/validator/client/polling/runner_test.go +++ b/validator/client/runner_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/service.go b/validator/client/service.go similarity index 99% rename from validator/client/polling/service.go rename to validator/client/service.go index 739441a910..9f9c27e75f 100644 --- a/validator/client/polling/service.go +++ b/validator/client/service.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/polling/service_test.go b/validator/client/service_test.go similarity index 99% rename from validator/client/polling/service_test.go rename to validator/client/service_test.go index 0d3cdaadbe..16e3be6e4b 100644 --- a/validator/client/polling/service_test.go +++ b/validator/client/service_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/client/streaming/aggregate.go b/validator/client/streaming/aggregate.go deleted file mode 100644 index 42f71f19c2..0000000000 --- a/validator/client/streaming/aggregate.go +++ /dev/null @@ -1,166 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "go.opencensus.io/trace" - - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" -) - -// SubmitAggregateAndProof submits the validator's signed slot signature to the beacon node -// via gRPC. Beacon node will verify the slot signature and determine if the validator is also -// an aggregator. If yes, then beacon node will broadcast aggregated signature and -// proof on the validator's behalf. -func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pubKey [48]byte) { - ctx, span := trace.StartSpan(ctx, "validator.SubmitAggregateAndProof") - defer span.End() - - span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey))) - fmtKey := fmt.Sprintf("%#x", pubKey[:]) - - epoch := slot / params.BeaconConfig().SlotsPerEpoch - duty, err := v.duty(pubKey, epoch) - if err != nil { - log.Errorf("Could not fetch validator assignment: %v", err) - if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - // Avoid sending beacon node duplicated aggregation requests. - k := validatorSubscribeKey(slot, duty.CommitteeIndex) - v.aggregatedSlotCommitteeIDCacheLock.Lock() - defer v.aggregatedSlotCommitteeIDCacheLock.Unlock() - if v.aggregatedSlotCommitteeIDCache.Contains(k) { - return - } - v.aggregatedSlotCommitteeIDCache.Add(k, true) - - slotSig, err := v.signSlot(ctx, pubKey, slot) - if err != nil { - log.Errorf("Could not sign slot: %v", err) - if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - // As specified in spec, an aggregator should wait until two thirds of the way through slot - // to broadcast the best aggregate to the global aggregate channel. - // https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#broadcast-aggregate - v.waitToSlotTwoThirds(ctx, slot) - - res, err := v.validatorClient.SubmitAggregateSelectionProof(ctx, ðpb.AggregateSelectionRequest{ - Slot: slot, - CommitteeIndex: duty.CommitteeIndex, - PublicKey: pubKey[:], - SlotSignature: slotSig, - }) - if err != nil { - log.WithField("slot", slot).Errorf("Could not submit slot signature to beacon node: %v", err) - if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - sig, err := v.aggregateAndProofSig(ctx, pubKey, res.AggregateAndProof) - if err != nil { - log.Errorf("Could not sign aggregate and proof: %v", err) - } - _, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, ðpb.SignedAggregateSubmitRequest{ - SignedAggregateAndProof: ðpb.SignedAggregateAttestationAndProof{ - Message: res.AggregateAndProof, - Signature: sig, - }, - }) - if err != nil { - log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err) - if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - if err := v.addIndicesToLog(duty); err != nil { - log.Errorf("Could not add aggregator indices to logs: %v", err) - if v.emitAccountMetrics { - metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - if v.emitAccountMetrics { - metrics.ValidatorAggSuccessVec.WithLabelValues(fmtKey).Inc() - } - -} - -// This implements selection logic outlined in: -// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#aggregation-selection -func (v *validator) signSlot(ctx context.Context, pubKey [48]byte, slot uint64) ([]byte, error) { - domain, err := v.domainData(ctx, helpers.SlotToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:]) - if err != nil { - return nil, err - } - - sig, err := v.signObject(pubKey, slot, domain.SignatureDomain) - if err != nil { - return nil, errors.Wrap(err, "Failed to sign slot") - } - - return sig.Marshal(), nil -} - -// waitToSlotTwoThirds waits until two third through the current slot period -// such that any attestations from this slot have time to reach the beacon node -// before creating the aggregated attestation. -func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot uint64) { - _, span := trace.StartSpan(ctx, "validator.waitToSlotTwoThirds") - defer span.End() - - oneThird := slotutil.DivideSlotBy(3 /* one third of slot duration */) - twoThird := oneThird + oneThird - delay := twoThird - - startTime := slotutil.SlotStartTime(v.genesisTime, slot) - finalTime := startTime.Add(delay) - time.Sleep(roughtime.Until(finalTime)) -} - -// This returns the signature of validator signing over aggregate and -// proof object. -func (v *validator) aggregateAndProofSig(ctx context.Context, pubKey [48]byte, agg *ethpb.AggregateAttestationAndProof) ([]byte, error) { - d, err := v.domainData(ctx, helpers.SlotToEpoch(agg.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:]) - if err != nil { - return nil, err - } - sig, err := v.signObject(pubKey, agg, d.SignatureDomain) - if err != nil { - return nil, err - } - - return sig.Marshal(), nil -} - -func (v *validator) addIndicesToLog(duty *ethpb.DutiesResponse_Duty) error { - v.attLogsLock.Lock() - defer v.attLogsLock.Unlock() - - for _, log := range v.attLogs { - if duty.CommitteeIndex == log.data.CommitteeIndex { - log.aggregatorIndices = append(log.aggregatorIndices, duty.ValidatorIndex) - } - } - - return nil -} diff --git a/validator/client/streaming/aggregate_test.go b/validator/client/streaming/aggregate_test.go deleted file mode 100644 index 6e827a31af..0000000000 --- a/validator/client/streaming/aggregate_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package streaming - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - logTest "github.com/sirupsen/logrus/hooks/test" - - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) { - hook := logTest.NewGlobal() - validator, _, finish := setup(t) - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{} - defer finish() - - validator.SubmitAggregateAndProof(context.Background(), 0, validatorPubKey) - - testutil.AssertLogsContain(t, hook, "Could not fetch validator assignment") -} - -func TestSubmitAggregateAndProof_Ok(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - }, - } - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().SubmitAggregateSelectionProof( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AggregateSelectionRequest{}), - ).Return(ðpb.AggregateSelectionResponse{ - AggregateAndProof: ðpb.AggregateAttestationAndProof{ - AggregatorIndex: 0, - Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{}}, - SelectionProof: nil, - }, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().SubmitSignedAggregateSelectionProof( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedAggregateSubmitRequest{}), - ).Return(ðpb.SignedAggregateSubmitResponse{}, nil) - - validator.SubmitAggregateAndProof(context.Background(), 0, validatorPubKey) -} - -func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) { - validator, _, finish := setup(t) - defer finish() - currentTime := roughtime.Now() - numOfSlots := uint64(4) - validator.genesisTime = uint64(currentTime.Unix()) - (numOfSlots * params.BeaconConfig().SecondsPerSlot) - oneThird := slotutil.DivideSlotBy(3 /* one third of slot duration */) - timeToSleep := oneThird + oneThird - - twoThirdTime := currentTime.Add(timeToSleep) - validator.waitToSlotTwoThirds(context.Background(), numOfSlots) - currentTime = roughtime.Now() - if currentTime.Unix() != twoThirdTime.Unix() { - t.Errorf("Wanted %v time for slot two third but got %v", twoThirdTime, currentTime) - } -} - -func TestAggregateAndProofSignature_CanSignValidSignature(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - ðpb.DomainRequest{Epoch: 0, Domain: params.BeaconConfig().DomainAggregateAndProof[:]}, - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - agg := ðpb.AggregateAttestationAndProof{ - AggregatorIndex: 0, - Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{}}, - SelectionProof: nil, - } - sig, err := validator.aggregateAndProofSig(context.Background(), validatorPubKey, agg) - if err != nil { - t.Fatal(err) - } - if _, err := bls.SignatureFromBytes(sig); err != nil { - t.Fatal(err) - } -} diff --git a/validator/client/streaming/attest.go b/validator/client/streaming/attest.go deleted file mode 100644 index a7d3ae77ae..0000000000 --- a/validator/client/streaming/attest.go +++ /dev/null @@ -1,322 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "time" - - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/hashutil" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// SubmitAttestation completes the validator client's attester responsibility at a given slot. -// It fetches the latest beacon block head along with the latest canonical beacon state -// information in order to sign the block and include information about the validator's -// participation in voting on the block. -func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [48]byte) { - ctx, span := trace.StartSpan(ctx, "validator.SubmitAttestation") - defer span.End() - span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey))) - - fmtKey := fmt.Sprintf("%#x", pubKey[:]) - log := log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))).WithField("slot", slot) - epoch := slot / params.BeaconConfig().SlotsPerEpoch - duty, err := v.duty(pubKey, epoch) - if err != nil { - log.WithError(err).Error("Could not fetch validator assignment") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - if len(duty.Committee) == 0 { - log.Debug("Empty committee for validator duty, not attesting") - return - } - - v.waitToSlotOneThird(ctx, slot) - - req := ðpb.AttestationDataRequest{ - Slot: slot, - CommitteeIndex: duty.CommitteeIndex, - } - data, err := v.validatorClient.GetAttestationData(ctx, req) - if err != nil { - log.WithError(err).Error("Could not request attestation to sign at slot") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - indexedAtt := ðpb.IndexedAttestation{ - AttestingIndices: []uint64{duty.ValidatorIndex}, - Data: data, - } - if err := v.preSigningValidations(ctx, indexedAtt, pubKey); err != nil { - return - } - sig, err := v.signAtt(ctx, pubKey, data) - if err != nil { - log.WithError(err).Error("Could not sign attestation") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - var indexInCommittee uint64 - var found bool - for i, vID := range duty.Committee { - if vID == duty.ValidatorIndex { - indexInCommittee = uint64(i) - found = true - break - } - } - if !found { - log.Errorf("Validator ID %d not found in committee of %v", duty.ValidatorIndex, duty.Committee) - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - aggregationBitfield := bitfield.NewBitlist(uint64(len(duty.Committee))) - aggregationBitfield.SetBitAt(indexInCommittee, true) - attestation := ðpb.Attestation{ - Data: data, - AggregationBits: aggregationBitfield, - Signature: sig, - } - - attResp, err := v.validatorClient.ProposeAttestation(ctx, attestation) - if err != nil { - log.WithError(err).Error("Could not submit attestation to beacon node") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - indexedAtt.Signature = sig - if err := v.postSignatureUpdate(ctx, indexedAtt, pubKey); err != nil { - log.WithFields(logrus.Fields{ - "sourceEpoch": indexedAtt.Data.Source.Epoch, - "targetEpoch": indexedAtt.Data.Target.Epoch, - }).Fatal("made a slashable attestation, found by external slasher service") - return - } - - if err := v.saveAttesterIndexToData(data, duty.ValidatorIndex); err != nil { - log.WithError(err).Error("Could not save validator index for logging") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - if v.emitAccountMetrics { - metrics.ValidatorAttestSuccessVec.WithLabelValues(fmtKey).Inc() - } - - span.AddAttributes( - trace.Int64Attribute("slot", int64(slot)), - trace.StringAttribute("attestationHash", fmt.Sprintf("%#x", attResp.AttestationDataRoot)), - trace.Int64Attribute("committeeIndex", int64(data.CommitteeIndex)), - trace.StringAttribute("blockRoot", fmt.Sprintf("%#x", data.BeaconBlockRoot)), - trace.Int64Attribute("justifiedEpoch", int64(data.Source.Epoch)), - trace.Int64Attribute("targetEpoch", int64(data.Target.Epoch)), - trace.StringAttribute("bitfield", fmt.Sprintf("%#x", aggregationBitfield)), - ) -} - -// Given validator's public key, this returns the signature of an attestation data. -func (v *validator) signAtt(ctx context.Context, pubKey [48]byte, data *ethpb.AttestationData) ([]byte, error) { - domain, err := v.domainData(ctx, data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester[:]) - if err != nil { - return nil, err - } - - root, err := helpers.ComputeSigningRoot(data, domain.SignatureDomain) - if err != nil { - return nil, err - } - - var sig bls.Signature - if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported { - sig, err = protectingKeymanager.SignAttestation(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), data) - } else { - sig, err = v.keyManager.Sign(pubKey, root) - } - if err != nil { - return nil, err - } - - return sig.Marshal(), nil -} - -// For logging, this saves the last submitted attester index to its attestation data. The purpose of this -// is to enhance attesting logs to be readable when multiple validator keys ran in a single client. -func (v *validator) saveAttesterIndexToData(data *ethpb.AttestationData, index uint64) error { - v.attLogsLock.Lock() - defer v.attLogsLock.Unlock() - - h, err := hashutil.HashProto(data) - if err != nil { - return err - } - - if v.attLogs[h] == nil { - v.attLogs[h] = &attSubmitted{data, []uint64{}, []uint64{}} - } - v.attLogs[h] = &attSubmitted{data, append(v.attLogs[h].attesterIndices, index), []uint64{}} - - return nil -} - -// isNewAttSlashable uses the attestation history to determine if an attestation of sourceEpoch -// and targetEpoch would be slashable. It can detect double, surrounding, and surrounded votes. -func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) bool { - farFuture := params.BeaconConfig().FarFutureEpoch - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - - // Previously pruned, we should return false. - if targetEpoch+wsPeriod <= history.LatestEpochWritten { - return false - } - - // Check if there has already been a vote for this target epoch. - if safeTargetToSource(history, targetEpoch) != farFuture { - return true - } - - // Check if the new attestation would be surrounding another attestation. - for i := sourceEpoch; i <= targetEpoch; i++ { - // Unattested for epochs are marked as FAR_FUTURE_EPOCH. - if safeTargetToSource(history, i) == farFuture { - continue - } - if history.TargetToSource[i%wsPeriod] > sourceEpoch { - return true - } - } - - // Check if the new attestation is being surrounded. - for i := targetEpoch; i <= history.LatestEpochWritten; i++ { - if safeTargetToSource(history, i) < sourceEpoch { - return true - } - } - - return false -} - -// markAttestationForTargetEpoch returns the modified attestation history with the passed-in epochs marked -// as attested for. This is done to prevent the validator client from signing any slashable attestations. -func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) *slashpb.AttestationHistory { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - - if targetEpoch > history.LatestEpochWritten { - // If the target epoch to mark is ahead of latest written epoch, override the old targets and mark the requested epoch. - // Limit the overwriting to one weak subjectivity period as further is not needed. - maxToWrite := history.LatestEpochWritten + wsPeriod - for i := history.LatestEpochWritten + 1; i < targetEpoch && i <= maxToWrite; i++ { - history.TargetToSource[i%wsPeriod] = params.BeaconConfig().FarFutureEpoch - } - history.LatestEpochWritten = targetEpoch - } - history.TargetToSource[targetEpoch%wsPeriod] = sourceEpoch - return history -} - -// safeTargetToSource makes sure the epoch accessed is within bounds, and if it's not it at -// returns the "default" FAR_FUTURE_EPOCH value. -func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - if targetEpoch > history.LatestEpochWritten || targetEpoch+wsPeriod < history.LatestEpochWritten { - return params.BeaconConfig().FarFutureEpoch - } - return history.TargetToSource[targetEpoch%wsPeriod] -} - -// waitToSlotOneThird waits until one third through the current slot period -// such that head block for beacon node can get updated. -func (v *validator) waitToSlotOneThird(ctx context.Context, slot uint64) { - _, span := trace.StartSpan(ctx, "validator.waitToSlotOneThird") - defer span.End() - - delay := slotutil.DivideSlotBy(3 /* a third of the slot duration */) - startTime := slotutil.SlotStartTime(v.genesisTime, slot) - finalTime := startTime.Add(delay) - time.Sleep(roughtime.Until(finalTime)) -} - -func (v *validator) preSigningValidations(ctx context.Context, indexedAtt *ethpb.IndexedAttestation, pubKey [48]byte) error { - fmtKey := fmt.Sprintf("%#x", pubKey[:]) - log := log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))).WithField("slot", indexedAtt.Data.Slot) - if featureconfig.Get().ProtectAttester { - v.attesterHistoryByPubKeyLock.RLock() - attesterHistory := v.attesterHistoryByPubKey[pubKey] - v.attesterHistoryByPubKeyLock.RUnlock() - if isNewAttSlashable(attesterHistory, indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) { - log.WithFields(logrus.Fields{ - "sourceEpoch": indexedAtt.Data.Source.Epoch, - "targetEpoch": indexedAtt.Data.Target.Epoch, - }).Error("Attempted to make a slashable attestation, rejected") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return fmt.Errorf("sourceEpoch: %dtargetEpoch: %d Attempted to make a slashable attestation,"+ - " rejected by local slasher protection", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) - } - } - if featureconfig.Get().SlasherProtection && v.protector != nil { - if !v.protector.VerifyAttestation(ctx, indexedAtt) { - log.WithFields(logrus.Fields{ - "sourceEpoch": indexedAtt.Data.Source.Epoch, - "targetEpoch": indexedAtt.Data.Target.Epoch, - }).Error("Attempted to make a slashable attestation, rejected by external slasher service") - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() - } - return fmt.Errorf("sourceEpoch: %dtargetEpoch: %d Attempted to make a slashable attestation,"+ - " rejected by external slasher service", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) - } - } - return nil -} - -func (v *validator) postSignatureUpdate(ctx context.Context, indexedAtt *ethpb.IndexedAttestation, pubKey [48]byte) error { - fmtKey := fmt.Sprintf("%#x", pubKey[:]) - if featureconfig.Get().ProtectAttester { - v.attesterHistoryByPubKeyLock.Lock() - attesterHistory := v.attesterHistoryByPubKey[pubKey] - attesterHistory = markAttestationForTargetEpoch(attesterHistory, indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) - v.attesterHistoryByPubKey[pubKey] = attesterHistory - v.attesterHistoryByPubKeyLock.Unlock() - } - - if featureconfig.Get().SlasherProtection && v.protector != nil { - if !v.protector.CommitAttestation(ctx, indexedAtt) { - if v.emitAccountMetrics { - metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc() - } - return fmt.Errorf("made a slashable attestation, sourceEpoch: %dtargetEpoch: %d "+ - " found by external slasher service", indexedAtt.Data.Source.Epoch, indexedAtt.Data.Target.Epoch) - } - } - return nil -} diff --git a/validator/client/streaming/attest_test.go b/validator/client/streaming/attest_test.go deleted file mode 100644 index 53c1c68087..0000000000 --- a/validator/client/streaming/attest_test.go +++ /dev/null @@ -1,641 +0,0 @@ -package streaming - -import ( - "context" - "errors" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/testutil" - mockSlasher "github.com/prysmaticlabs/prysm/validator/testing" - logTest "github.com/sirupsen/logrus/hooks/test" - "gopkg.in/d4l3k/messagediff.v1" -) - -func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) { - hook := logTest.NewGlobal() - validator, _, finish := setup(t) - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{} - defer finish() - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Could not fetch validator assignment") -} - -func TestAttestToBlockHead_SubmitAttestation_EmptyCommittee(t *testing.T) { - hook := logTest.NewGlobal() - - validator, _, finish := setup(t) - defer finish() - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 0, - Committee: make([]uint64, 0), - ValidatorIndex: 0, - }, - } - validator.SubmitAttestation(context.Background(), 0, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Empty committee") -} - -func TestAttestToBlockHead_SubmitAttestation_RequestFailure(t *testing.T) { - hook := logTest.NewGlobal() - - validator, m, finish := setup(t) - defer finish() - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: make([]uint64, 111), - ValidatorIndex: 0, - }, - } - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - BeaconBlockRoot: []byte{}, - Target: ðpb.Checkpoint{}, - Source: ðpb.Checkpoint{}, - }, nil) - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch2 - ).Return(ðpb.DomainResponse{}, nil /*err*/) - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Return(nil, errors.New("something went wrong")) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Could not submit attestation to beacon node") -} - -func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - validator, m, finish := setup(t) - defer finish() - hook := logTest.NewGlobal() - validatorIndex := uint64(7) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - - beaconBlockRoot := bytesutil.ToBytes32([]byte("A")) - targetRoot := bytesutil.ToBytes32([]byte("B")) - sourceRoot := bytesutil.ToBytes32([]byte("C")) - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - BeaconBlockRoot: beaconBlockRoot[:], - Target: ðpb.Checkpoint{Root: targetRoot[:]}, - Source: ðpb.Checkpoint{Root: sourceRoot[:], Epoch: 3}, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{SignatureDomain: []byte{}}, nil /*err*/) - - var generatedAttestation *ethpb.Attestation - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Do(func(_ context.Context, att *ethpb.Attestation) { - generatedAttestation = att - }).Return(ðpb.AttestResponse{}, nil /* error */) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - - aggregationBitfield := bitfield.NewBitlist(uint64(len(committee))) - aggregationBitfield.SetBitAt(4, true) - expectedAttestation := ðpb.Attestation{ - Data: ðpb.AttestationData{ - BeaconBlockRoot: beaconBlockRoot[:], - Target: ðpb.Checkpoint{Root: targetRoot[:]}, - Source: ðpb.Checkpoint{Root: sourceRoot[:], Epoch: 3}, - }, - AggregationBits: aggregationBitfield, - } - - root, err := helpers.ComputeSigningRoot(expectedAttestation.Data, []byte{}) - if err != nil { - t.Fatal(err) - } - - sig, err := validator.keyManager.Sign(validatorPubKey, root) - if err != nil { - t.Fatal(err) - } - expectedAttestation.Signature = sig.Marshal() - if !reflect.DeepEqual(generatedAttestation, expectedAttestation) { - t.Errorf("Incorrectly attested head, wanted %v, received %v", expectedAttestation, generatedAttestation) - diff, _ := messagediff.PrettyDiff(expectedAttestation, generatedAttestation) - t.Log(diff) - } - testutil.AssertLogsDoNotContain(t, hook, "Could not") -} - -func TestAttestToBlockHead_BlocksDoubleAtt(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - validatorIndex := uint64(7) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - beaconBlockRoot := bytesutil.ToBytes32([]byte("A")) - targetRoot := bytesutil.ToBytes32([]byte("B")) - sourceRoot := bytesutil.ToBytes32([]byte("C")) - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Times(2).Return(ðpb.AttestationData{ - BeaconBlockRoot: beaconBlockRoot[:], - Target: ðpb.Checkpoint{Root: targetRoot[:], Epoch: 4}, - Source: ðpb.Checkpoint{Root: sourceRoot[:], Epoch: 3}, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Return(ðpb.AttestResponse{}, nil /* error */) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected") -} - -func TestPostSignatureUpdate(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: false, - SlasherProtection: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - validator, _, finish := setup(t) - defer finish() - att := ðpb.IndexedAttestation{ - AttestingIndices: []uint64{1, 2}, - Data: ðpb.AttestationData{ - Slot: 5, - CommitteeIndex: 2, - BeaconBlockRoot: []byte("great block"), - Source: ðpb.Checkpoint{ - Epoch: 4, - Root: []byte("good source"), - }, - Target: ðpb.Checkpoint{ - Epoch: 10, - Root: []byte("good target"), - }, - }, - } - mockProtector := &mockSlasher.MockProtector{AllowAttestation: false} - validator.protector = mockProtector - err := validator.postSignatureUpdate(context.Background(), att, validatorPubKey) - if err == nil || !strings.Contains(err.Error(), "made a slashable attestation,") { - t.Fatalf("Expected error to be thrown when post signature update is detected as slashable. got: %v", err) - } - mockProtector.AllowAttestation = true - err = validator.postSignatureUpdate(context.Background(), att, validatorPubKey) - if err != nil { - t.Fatalf("Expected allowed attestation not to throw error. got: %v", err) - } -} - -func TestPreSignatureValidation(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: false, - SlasherProtection: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - validator, _, finish := setup(t) - defer finish() - hook := logTest.NewGlobal() - att := ðpb.IndexedAttestation{ - AttestingIndices: []uint64{1, 2}, - Data: ðpb.AttestationData{ - Slot: 5, - CommitteeIndex: 2, - BeaconBlockRoot: []byte("great block"), - Source: ðpb.Checkpoint{ - Epoch: 4, - Root: []byte("good source"), - }, - Target: ðpb.Checkpoint{ - Epoch: 10, - Root: []byte("good target"), - }, - }, - } - mockProtector := &mockSlasher.MockProtector{AllowAttestation: false} - validator.protector = mockProtector - err := validator.preSigningValidations(context.Background(), att, validatorPubKey) - if err == nil || !strings.Contains(err.Error(), "rejected by external slasher service") { - t.Fatal(err) - } - testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected by external slasher service") - mockProtector.AllowAttestation = true - err = validator.preSigningValidations(context.Background(), att, validatorPubKey) - if err != nil { - t.Fatalf("Expected allowed attestation not to throw error. got: %v", err) - } -} - -func TestAttestToBlockHead_BlocksSurroundAtt(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - validatorIndex := uint64(7) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - beaconBlockRoot := bytesutil.ToBytes32([]byte("A")) - targetRoot := bytesutil.ToBytes32([]byte("B")) - sourceRoot := bytesutil.ToBytes32([]byte("C")) - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Times(2).Return(ðpb.AttestationData{ - BeaconBlockRoot: beaconBlockRoot[:], - Target: ðpb.Checkpoint{Root: targetRoot[:], Epoch: 2}, - Source: ðpb.Checkpoint{Root: sourceRoot[:], Epoch: 1}, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Return(ðpb.AttestResponse{}, nil /* error */) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected") -} - -func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) { - config := &featureconfig.Flags{ - ProtectAttester: true, - } - reset := featureconfig.InitWithReset(config) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - validatorIndex := uint64(7) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - beaconBlockRoot := bytesutil.ToBytes32([]byte("A")) - targetRoot := bytesutil.ToBytes32([]byte("B")) - sourceRoot := bytesutil.ToBytes32([]byte("C")) - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - BeaconBlockRoot: beaconBlockRoot[:], - Target: ðpb.Checkpoint{Root: targetRoot[:], Epoch: 3}, - Source: ðpb.Checkpoint{Root: sourceRoot[:], Epoch: 0}, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Return(ðpb.AttestResponse{}, nil /* error */) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - BeaconBlockRoot: []byte("A"), - Target: ðpb.Checkpoint{Root: []byte("B"), Epoch: 2}, - Source: ðpb.Checkpoint{Root: []byte("C"), Epoch: 1}, - }, nil) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected") -} - -func TestAttestToBlockHead_DoesNotAttestBeforeDelay(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - - validator.genesisTime = uint64(roughtime.Now().Unix()) - m.validatorClient.EXPECT().GetDuties( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.DutiesRequest{}), - gomock.Any(), - ).Times(0) - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Times(0) - - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Return(ðpb.AttestResponse{}, nil /* error */).Times(0) - - timer := time.NewTimer(1 * time.Second) - go validator.SubmitAttestation(context.Background(), 0, validatorPubKey) - <-timer.C -} - -func TestAttestToBlockHead_DoesAttestAfterDelay(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - - var wg sync.WaitGroup - wg.Add(1) - defer wg.Wait() - - validator.genesisTime = uint64(roughtime.Now().Unix()) - validatorIndex := uint64(5) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - BeaconBlockRoot: []byte("A"), - Target: ðpb.Checkpoint{Root: []byte("B")}, - Source: ðpb.Checkpoint{Root: []byte("C"), Epoch: 3}, - }, nil).Do(func(arg0, arg1 interface{}) { - wg.Done() - }) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.Any(), - ).Return(ðpb.AttestResponse{}, nil).Times(1) - - validator.SubmitAttestation(context.Background(), 0, validatorPubKey) -} - -func TestAttestToBlockHead_CorrectBitfieldLength(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - validatorIndex := uint64(2) - committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10} - validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - PublicKey: validatorKey.PublicKey.Marshal(), - CommitteeIndex: 5, - Committee: committee, - ValidatorIndex: validatorIndex, - }, - } - m.validatorClient.EXPECT().GetAttestationData( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AttestationDataRequest{}), - ).Return(ðpb.AttestationData{ - Target: ðpb.Checkpoint{Root: []byte("B")}, - Source: ðpb.Checkpoint{Root: []byte("C"), Epoch: 3}, - }, nil) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - var generatedAttestation *ethpb.Attestation - m.validatorClient.EXPECT().ProposeAttestation( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.Attestation{}), - ).Do(func(_ context.Context, att *ethpb.Attestation) { - generatedAttestation = att - }).Return(ðpb.AttestResponse{}, nil /* error */) - - validator.SubmitAttestation(context.Background(), 30, validatorPubKey) - - if len(generatedAttestation.AggregationBits) != 2 { - t.Errorf("Wanted length %d, received %d", 2, len(generatedAttestation.AggregationBits)) - } -} - -func TestAttestationHistory_BlocksDoubleAttestation(t *testing.T) { - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - attestations := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 0, - } - - // Mark an attestation spanning epochs 0 to 3. - newAttSource := uint64(0) - newAttTarget := uint64(3) - attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget) - if attestations.LatestEpochWritten != newAttTarget { - t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten) - } - - // Try an attestation that should be slashable (double att) spanning epochs 1 to 3. - newAttSource = uint64(1) - newAttTarget = uint64(3) - if !isNewAttSlashable(attestations, newAttSource, newAttTarget) { - t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget) - } -} - -func TestAttestationHistory_Prunes(t *testing.T) { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - attestations := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 0, - } - - // Try an attestation on totally unmarked history, should not be slashable. - if isNewAttSlashable(attestations, 0, wsPeriod+5) { - t.Fatalf("Expected attestation of source 0, target %d to be considered slashable", wsPeriod+5) - } - - // Mark attestations spanning epochs 0 to 3 and 6 to 9. - prunedNewAttSource := uint64(0) - prunedNewAttTarget := uint64(3) - attestations = markAttestationForTargetEpoch(attestations, prunedNewAttSource, prunedNewAttTarget) - newAttSource := prunedNewAttSource + 6 - newAttTarget := prunedNewAttTarget + 6 - attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget) - if attestations.LatestEpochWritten != newAttTarget { - t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten) - } - - // Mark an attestation spanning epochs 54000 to 54003. - farNewAttSource := newAttSource + wsPeriod - farNewAttTarget := newAttTarget + wsPeriod - attestations = markAttestationForTargetEpoch(attestations, farNewAttSource, farNewAttTarget) - if attestations.LatestEpochWritten != farNewAttTarget { - t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten) - } - - if safeTargetToSource(attestations, prunedNewAttTarget) != params.BeaconConfig().FarFutureEpoch { - t.Fatalf("Expected attestation at target epoch %d to not be marked", prunedNewAttTarget) - } - - if safeTargetToSource(attestations, farNewAttTarget) != farNewAttSource { - t.Fatalf("Expected attestation at target epoch %d to not be marked", farNewAttSource) - } - - // Try an attestation from existing source to outside prune, should slash. - if !isNewAttSlashable(attestations, newAttSource, farNewAttTarget) { - t.Fatalf("Expected attestation of source %d, target %d to be considered slashable", newAttSource, farNewAttTarget) - } - // Try an attestation from before existing target to outside prune, should slash. - if !isNewAttSlashable(attestations, newAttTarget-1, farNewAttTarget) { - t.Fatalf("Expected attestation of source %d, target %d to be considered slashable", newAttTarget-1, farNewAttTarget) - } - // Try an attestation larger than pruning amount, should slash. - if !isNewAttSlashable(attestations, 0, farNewAttTarget+5) { - t.Fatalf("Expected attestation of source 0, target %d to be considered slashable", farNewAttTarget+5) - } -} - -func TestAttestationHistory_BlocksSurroundedAttestation(t *testing.T) { - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - attestations := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 0, - } - - // Mark an attestation spanning epochs 0 to 3. - newAttSource := uint64(0) - newAttTarget := uint64(3) - attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget) - if attestations.LatestEpochWritten != newAttTarget { - t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten) - } - - // Try an attestation that should be slashable (being surrounded) spanning epochs 1 to 2. - newAttSource = uint64(1) - newAttTarget = uint64(2) - if !isNewAttSlashable(attestations, newAttSource, newAttTarget) { - t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget) - } -} - -func TestAttestationHistory_BlocksSurroundingAttestation(t *testing.T) { - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - attestations := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 0, - } - - // Mark an attestation spanning epochs 1 to 2. - newAttSource := uint64(1) - newAttTarget := uint64(2) - attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget) - if attestations.LatestEpochWritten != newAttTarget { - t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten) - } - if attestations.TargetToSource[newAttTarget] != newAttSource { - t.Fatalf("Expected source epoch to be %d, received %d", newAttSource, attestations.TargetToSource[newAttTarget]) - } - - // Try an attestation that should be slashable (surrounding) spanning epochs 0 to 3. - newAttSource = uint64(0) - newAttTarget = uint64(3) - if !isNewAttSlashable(attestations, newAttSource, newAttTarget) { - t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget) - } -} diff --git a/validator/client/streaming/duties.go b/validator/client/streaming/duties.go deleted file mode 100644 index 2aa36399d4..0000000000 --- a/validator/client/streaming/duties.go +++ /dev/null @@ -1,205 +0,0 @@ -package streaming - -import ( - "bytes" - "context" - "fmt" - "io" - - "github.com/pkg/errors" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/params" - "go.opencensus.io/trace" -) - -// StreamDuties consumes a server-side stream of validator duties from a beacon node -// for a set of validating keys passed in as a request type. New duties will be -// sent over the stream upon a new epoch being reached or from a a chain reorg happening -// across epochs in the beacon node. -func (v *validator) StreamDuties(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "validator.StreamDuties") - defer span.End() - - validatingKeys, err := v.keyManager.FetchValidatingKeys() - if err != nil { - return err - } - numValidatingKeys := len(validatingKeys) - req := ðpb.DutiesRequest{ - PublicKeys: bytesutil.FromBytes48Array(validatingKeys), - } - stream, err := v.validatorClient.StreamDuties(ctx, req) - if err != nil { - return errors.Wrap(err, "Could not setup validator duties streaming client") - } - for { - res, err := stream.Recv() - // If the stream is closed, we stop the loop. - if err == io.EOF { - break - } - // If context is canceled we stop the loop. - if ctx.Err() == context.Canceled { - return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") - } - if err != nil { - return errors.Wrap(err, "Could not receive duties from stream") - } - // Updates validator duties and requests the beacon node to subscribe - // to attestation subnets in advance. - v.updateDuties(ctx, res, numValidatingKeys) - if err := v.requestSubnetSubscriptions(ctx, res, numValidatingKeys); err != nil { - log.WithError(err).Error("Could not request beacon node to subscribe to subnets") - } - } - return nil -} - -// RolesAt slot returns the validator roles at the given slot. Returns nil if the -// validator is known to not have a roles at the at slot. Returns UNKNOWN if the -// validator assignments are unknown. Otherwise returns a valid validatorRole map. -func (v *validator) RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]validatorRole, error) { - epoch := slot / params.BeaconConfig().SlotsPerEpoch - rolesAt := make(map[[48]byte][]validatorRole) - v.dutiesLock.RLock() - duty, ok := v.dutiesByEpoch[epoch] - if !ok { - v.dutiesLock.RUnlock() - log.Debugf("No assigned duties yet for epoch %d", epoch) - return rolesAt, nil - } - v.dutiesLock.RUnlock() - for _, dt := range duty { - var roles []validatorRole - - if dt == nil { - continue - } - if len(dt.ProposerSlots) > 0 { - for _, proposerSlot := range dt.ProposerSlots { - if proposerSlot != 0 && proposerSlot == slot { - roles = append(roles, roleProposer) - break - } - } - } - if dt.AttesterSlot == slot { - roles = append(roles, roleAttester) - - aggregator, err := v.isAggregator(ctx, dt.Committee, slot, bytesutil.ToBytes48(dt.PublicKey)) - if err != nil { - return nil, errors.Wrap(err, "could not check if a validator is an aggregator") - } - if aggregator { - roles = append(roles, roleAggregator) - } - - } - if len(roles) == 0 { - roles = append(roles, roleUnknown) - } - - var pubKey [48]byte - copy(pubKey[:], dt.PublicKey) - rolesAt[pubKey] = roles - } - return rolesAt, nil -} - -// Update duties sets the received validator duties in-memory for the validator client -// and determines which validating keys were selected as attestation aggregators -// for the epoch. Additionally, this function uses that information to notify -// the beacon node it should subscribe the assigned attestation p2p subnets. -func (v *validator) updateDuties(ctx context.Context, dutiesResp *ethpb.DutiesResponse, numKeys int) { - ctx, span := trace.StartSpan(ctx, "validator.updateDuties") - defer span.End() - currentSlot := v.CurrentSlot() - currentEpoch := currentSlot / params.BeaconConfig().SlotsPerEpoch - - v.dutiesLock.Lock() - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty, 2) - v.dutiesByEpoch[currentEpoch] = dutiesResp.CurrentEpochDuties - v.dutiesByEpoch[currentEpoch+1] = dutiesResp.NextEpochDuties - v.dutiesLock.Unlock() - - v.logDuties(currentSlot, dutiesResp.CurrentEpochDuties) - v.logDuties(currentSlot+params.BeaconConfig().SlotsPerEpoch, dutiesResp.NextEpochDuties) -} - -// Given the validator public key and an epoch, this gets the validator assignment. -func (v *validator) duty(pubKey [48]byte, epoch uint64) (*ethpb.DutiesResponse_Duty, error) { - v.dutiesLock.RLock() - defer v.dutiesLock.RUnlock() - duty, ok := v.dutiesByEpoch[epoch] - if !ok { - return nil, fmt.Errorf("no duty found for epoch %d", epoch) - } - for _, d := range duty { - if bytes.Equal(pubKey[:], d.PublicKey) { - return d, nil - } - } - return nil, fmt.Errorf("pubkey %#x not in duties", bytesutil.Trunc(pubKey[:])) -} - -func (v *validator) requestSubnetSubscriptions(ctx context.Context, dutiesResp *ethpb.DutiesResponse, numKeys int) error { - subscribeSlots := make([]uint64, 0, numKeys) - subscribeCommitteeIDs := make([]uint64, 0, numKeys) - subscribeIsAggregator := make([]bool, 0, numKeys) - alreadySubscribed := make(map[[64]byte]bool) - for _, duty := range dutiesResp.CurrentEpochDuties { - if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING { - attesterSlot := duty.AttesterSlot - committeeIndex := duty.CommitteeIndex - - alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex) - if _, ok := alreadySubscribed[alreadySubscribedKey]; ok { - continue - } - - aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey)) - if err != nil { - return errors.Wrap(err, "could not check if a validator is an aggregator") - } - if aggregator { - alreadySubscribed[alreadySubscribedKey] = true - } - - subscribeSlots = append(subscribeSlots, attesterSlot) - subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex) - subscribeIsAggregator = append(subscribeIsAggregator, aggregator) - } - } - - for _, duty := range dutiesResp.NextEpochDuties { - if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING { - attesterSlot := duty.AttesterSlot - committeeIndex := duty.CommitteeIndex - - alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex) - if _, ok := alreadySubscribed[alreadySubscribedKey]; ok { - continue - } - - aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey)) - if err != nil { - return errors.Wrap(err, "could not check if a validator is an aggregator") - } - if aggregator { - alreadySubscribed[alreadySubscribedKey] = true - } - - subscribeSlots = append(subscribeSlots, attesterSlot) - subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex) - subscribeIsAggregator = append(subscribeIsAggregator, aggregator) - } - } - - _, err := v.validatorClient.SubscribeCommitteeSubnets(ctx, ðpb.CommitteeSubnetsSubscribeRequest{ - Slots: subscribeSlots, - CommitteeIds: subscribeCommitteeIDs, - IsAggregator: subscribeIsAggregator, - }) - return err -} diff --git a/validator/client/streaming/duties_test.go b/validator/client/streaming/duties_test.go deleted file mode 100644 index a3f21820e6..0000000000 --- a/validator/client/streaming/duties_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package streaming - -import ( - "context" - "errors" - "io" - "strings" - "testing" - "time" - - "github.com/golang/mock/gomock" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - - "github.com/prysmaticlabs/prysm/shared/mock" - "github.com/prysmaticlabs/prysm/shared/params" -) - -func TestStreamDuties_ReturnsError(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - CommitteeIndex: 1, - }, - } - - expected := errors.New("bad") - - client.EXPECT().StreamDuties( - gomock.Any(), - gomock.Any(), - ).Return(nil, expected) - - if err := v.StreamDuties(context.Background()); !strings.Contains(err.Error(), "bad") { - t.Errorf("Bad error; want=%v got=%v", expected, err) - } -} - -func TestStreamDuties_OK(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - resp := ðpb.DutiesResponse{ - CurrentEpochDuties: []*ethpb.DutiesResponse_Duty{ - { - AttesterSlot: params.BeaconConfig().SlotsPerEpoch, - ValidatorIndex: 200, - CommitteeIndex: 100, - Committee: []uint64{0, 1, 2, 3}, - PublicKey: []byte("testPubKey_1"), - ProposerSlots: []uint64{params.BeaconConfig().SlotsPerEpoch + 1}, - }, - { - AttesterSlot: params.BeaconConfig().SlotsPerEpoch, - ValidatorIndex: 201, - CommitteeIndex: 101, - Committee: []uint64{0, 1, 2, 3}, - PublicKey: []byte("testPubKey_2"), - ProposerSlots: []uint64{params.BeaconConfig().SlotsPerEpoch + 2}, - }, - }, - } - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - v.genesisTime = uint64(time.Now().Unix()) + 500 - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - stream := mock.NewMockBeaconNodeValidator_StreamDutiesClient(ctrl) - client.EXPECT().StreamDuties( - gomock.Any(), - gomock.Any(), - ).Return(stream, nil) - ctx := context.Background() - stream.EXPECT().Context().Return(ctx).AnyTimes() - stream.EXPECT().Recv().Return( - resp, - nil, - ) - - client.EXPECT().SubscribeCommitteeSubnets( - gomock.Any(), - gomock.Any(), - ).Return(nil, nil) - - stream.EXPECT().Recv().Return( - nil, - io.EOF, - ) - - if err := v.StreamDuties(ctx); err != nil { - t.Fatalf("Could not update assignments: %v", err) - } - if v.dutiesByEpoch[0][0].ProposerSlots[0] != params.BeaconConfig().SlotsPerEpoch+1 { - t.Errorf( - "Unexpected validator assignments. want=%v got=%v", - params.BeaconConfig().SlotsPerEpoch+1, - v.dutiesByEpoch[0][0].ProposerSlots[0], - ) - } - if v.dutiesByEpoch[0][0].AttesterSlot != params.BeaconConfig().SlotsPerEpoch { - t.Errorf( - "Unexpected validator assignments. want=%v got=%v", - params.BeaconConfig().SlotsPerEpoch, - v.dutiesByEpoch[0][0].AttesterSlot, - ) - } - if v.dutiesByEpoch[0][0].CommitteeIndex != resp.CurrentEpochDuties[0].CommitteeIndex { - t.Errorf( - "Unexpected validator assignments. want=%v got=%v", - resp.Duties[0].CommitteeIndex, - v.dutiesByEpoch[0][0].CommitteeIndex, - ) - } - if v.dutiesByEpoch[0][0].ValidatorIndex != resp.CurrentEpochDuties[0].ValidatorIndex { - t.Errorf( - "Unexpected validator assignments. want=%v got=%v", - resp.CurrentEpochDuties[0].ValidatorIndex, - v.dutiesByEpoch[0][0].ValidatorIndex, - ) - } - if v.dutiesByEpoch[0][1].ValidatorIndex != resp.CurrentEpochDuties[1].ValidatorIndex { - t.Errorf( - "Unexpected validator assignments. want=%v got=%v", - resp.CurrentEpochDuties[1].ValidatorIndex, - v.dutiesByEpoch[0][1].ValidatorIndex, - ) - } -} diff --git a/validator/client/streaming/fake_validator_test.go b/validator/client/streaming/fake_validator_test.go deleted file mode 100644 index dfb0bd85d0..0000000000 --- a/validator/client/streaming/fake_validator_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package streaming - -import ( - "context" - "time" -) - -var _ = Validator(&fakeValidator{}) - -type fakeValidator struct { - DoneCalled bool - WaitForActivationCalled bool - WaitForChainStartCalled bool - WaitForSyncCalled bool - WaitForSyncedCalled bool - NextSlotCalled bool - StreamDutiesCalled bool - UpdateProtectionsCalled bool - RoleAtCalled bool - AttestToBlockHeadCalled bool - ProposeBlockCalled bool - LogValidatorGainsAndLossesCalled bool - SaveProtectionsCalled bool - SlotDeadlineCalled bool - ProposeBlockArg1 uint64 - AttestToBlockHeadArg1 uint64 - RoleAtArg1 uint64 - NextSlotRet <-chan uint64 - PublicKey string - StreamDutiesRet error - RolesAtRet []validatorRole -} - -func (fv *fakeValidator) Done() { - fv.DoneCalled = true -} - -func (fv *fakeValidator) WaitForChainStart(_ context.Context) error { - fv.WaitForChainStartCalled = true - return nil -} - -func (fv *fakeValidator) WaitForActivation(_ context.Context) error { - fv.WaitForActivationCalled = true - return nil -} - -func (fv *fakeValidator) WaitForSync(_ context.Context) error { - fv.WaitForSyncCalled = true - return nil -} - -func (fv *fakeValidator) WaitForSynced(_ context.Context) error { - fv.WaitForSyncedCalled = true - return nil -} - -func (fv *fakeValidator) SlotDeadline(_ uint64) time.Time { - fv.SlotDeadlineCalled = true - return time.Now() -} - -func (fv *fakeValidator) NextSlot() <-chan uint64 { - fv.NextSlotCalled = true - return fv.NextSlotRet -} - -func (fv *fakeValidator) StreamDuties(_ context.Context) error { - fv.StreamDutiesCalled = true - return fv.StreamDutiesRet -} - -func (fv *fakeValidator) UpdateProtections(_ context.Context, slot uint64) error { - fv.UpdateProtectionsCalled = true - return nil -} - -func (fv *fakeValidator) LogValidatorGainsAndLosses(_ context.Context, slot uint64) error { - fv.LogValidatorGainsAndLossesCalled = true - return nil -} - -func (fv *fakeValidator) SaveProtections(_ context.Context) error { - fv.SaveProtectionsCalled = true - return nil -} - -func (fv *fakeValidator) RolesAt(_ context.Context, slot uint64) (map[[48]byte][]validatorRole, error) { - fv.RoleAtCalled = true - fv.RoleAtArg1 = slot - vr := make(map[[48]byte][]validatorRole) - vr[[48]byte{1}] = fv.RolesAtRet - return vr, nil -} - -func (fv *fakeValidator) SubmitAttestation(_ context.Context, slot uint64, pubKey [48]byte) { - fv.AttestToBlockHeadCalled = true - fv.AttestToBlockHeadArg1 = slot -} - -func (fv *fakeValidator) ProposeBlock(_ context.Context, slot uint64, pubKey [48]byte) { - fv.ProposeBlockCalled = true - fv.ProposeBlockArg1 = slot -} - -func (fv *fakeValidator) SubmitAggregateAndProof(_ context.Context, slot uint64, pubKey [48]byte) {} - -func (fv *fakeValidator) LogAttestationsSubmitted() {} - -func (fv *fakeValidator) UpdateDomainDataCaches(context.Context, uint64) {} - -func (fv *fakeValidator) CurrentSlot() uint64 { return 0 } diff --git a/validator/client/streaming/log.go b/validator/client/streaming/log.go deleted file mode 100644 index d943815abc..0000000000 --- a/validator/client/streaming/log.go +++ /dev/null @@ -1,37 +0,0 @@ -package streaming - -import ( - "fmt" - - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/sirupsen/logrus" - - "github.com/prysmaticlabs/prysm/shared/bytesutil" -) - -type attSubmitted struct { - data *ethpb.AttestationData - attesterIndices []uint64 - aggregatorIndices []uint64 -} - -func (v *validator) LogAttestationsSubmitted() { - v.attLogsLock.Lock() - defer v.attLogsLock.Unlock() - - for _, attLog := range v.attLogs { - log.WithFields(logrus.Fields{ - "Slot": attLog.data.Slot, - "CommitteeIndex": attLog.data.CommitteeIndex, - "BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.BeaconBlockRoot)), - "SourceEpoch": attLog.data.Source.Epoch, - "SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.Source.Root)), - "TargetEpoch": attLog.data.Target.Epoch, - "TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.Target.Root)), - "AttesterIndices": attLog.attesterIndices, - "AggregatorIndices": attLog.aggregatorIndices, - }).Info("Submitted new attestations") - } - - v.attLogs = make(map[[32]byte]*attSubmitted) -} diff --git a/validator/client/streaming/metrics.go b/validator/client/streaming/metrics.go deleted file mode 100644 index 2f3c756bcc..0000000000 --- a/validator/client/streaming/metrics.go +++ /dev/null @@ -1,109 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/validator/client/metrics" - "github.com/sirupsen/logrus" -) - -// LogValidatorGainsAndLosses logs important metrics related to this validator client's -// responsibilities throughout the beacon chain's lifecycle. It logs absolute accrued rewards -// and penalties over time, percentage gain/loss, and gives the end user a better idea -// of how the validator performs with respect to the rest. -func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64) error { - if slot%params.BeaconConfig().SlotsPerEpoch != 0 || slot <= params.BeaconConfig().SlotsPerEpoch { - // Do nothing unless we are at the start of the epoch, and not in the first epoch. - return nil - } - if !v.logValidatorBalances { - return nil - } - - pks, err := v.keyManager.FetchValidatingKeys() - if err != nil { - return err - } - pubKeys := bytesutil.FromBytes48Array(pks) - - req := ðpb.ValidatorPerformanceRequest{ - PublicKeys: pubKeys, - } - resp, err := v.beaconClient.GetValidatorPerformance(ctx, req) - if err != nil { - return err - } - - if v.emitAccountMetrics { - for _, missingPubKey := range resp.MissingValidators { - fmtKey := fmt.Sprintf("%#x", missingPubKey[:]) - metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0) - } - } - - included := 0 - votedSource := 0 - votedTarget := 0 - votedHead := 0 - prevEpoch := uint64(0) - if slot >= params.BeaconConfig().SlotsPerEpoch { - prevEpoch = (slot / params.BeaconConfig().SlotsPerEpoch) - 1 - } - gweiPerEth := float64(params.BeaconConfig().GweiPerEth) - for i, pubKey := range resp.PublicKeys { - pubKeyBytes := bytesutil.ToBytes48(pubKey) - if slot < params.BeaconConfig().SlotsPerEpoch { - v.prevBalance[pubKeyBytes] = params.BeaconConfig().MaxEffectiveBalance - } - - truncatedKey := fmt.Sprintf("%#x", pubKey[:8]) - if v.prevBalance[pubKeyBytes] > 0 { - newBalance := float64(resp.BalancesAfterEpochTransition[i]) / gweiPerEth - prevBalance := float64(resp.BalancesBeforeEpochTransition[i]) / gweiPerEth - percentNet := (newBalance - prevBalance) / prevBalance - log.WithFields(logrus.Fields{ - "pubKey": truncatedKey, - "epoch": prevEpoch, - "correctlyVotedSource": resp.CorrectlyVotedSource[i], - "correctlyVotedTarget": resp.CorrectlyVotedTarget[i], - "correctlyVotedHead": resp.CorrectlyVotedHead[i], - "inclusionSlot": resp.InclusionSlots[i], - "inclusionDistance": resp.InclusionDistances[i], - "oldBalance": prevBalance, - "newBalance": newBalance, - "percentChange": fmt.Sprintf("%.5f%%", percentNet*100), - }).Info("Previous epoch voting summary") - if v.emitAccountMetrics { - metrics.ValidatorBalancesGaugeVec.WithLabelValues(truncatedKey).Set(newBalance) - } - } - - if resp.InclusionSlots[i] != ^uint64(0) { - included++ - } - if resp.CorrectlyVotedSource[i] { - votedSource++ - } - if resp.CorrectlyVotedTarget[i] { - votedTarget++ - } - if resp.CorrectlyVotedHead[i] { - votedHead++ - } - v.prevBalance[pubKeyBytes] = resp.BalancesBeforeEpochTransition[i] - } - - log.WithFields(logrus.Fields{ - "epoch": prevEpoch, - "attestationInclusionPercentage": fmt.Sprintf("%.0f%%", (float64(included)/float64(len(resp.InclusionSlots)))*100), - "correctlyVotedSourcePercentage": fmt.Sprintf("%.0f%%", (float64(votedSource)/float64(len(resp.CorrectlyVotedSource)))*100), - "correctlyVotedTargetPercentage": fmt.Sprintf("%.0f%%", (float64(votedTarget)/float64(len(resp.CorrectlyVotedTarget)))*100), - "correctlyVotedHeadPercentage": fmt.Sprintf("%.0f%%", (float64(votedHead)/float64(len(resp.CorrectlyVotedHead)))*100), - }).Info("Previous epoch aggregated voting summary") - - return nil -} diff --git a/validator/client/streaming/propose.go b/validator/client/streaming/propose.go deleted file mode 100644 index 96df45e009..0000000000 --- a/validator/client/streaming/propose.go +++ /dev/null @@ -1,222 +0,0 @@ -package streaming - -// Validator client proposer functions. -import ( - "context" - "fmt" - - "github.com/pkg/errors" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/shared/blockutil" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/validator/client/metrics" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// ProposeBlock A new beacon block for a given slot. This method collects the -// previous beacon block, any pending deposits, and ETH1 data from the beacon -// chain node to construct the new block. The new block is then processed with -// the state root computation, and finally signed by the validator before being -// sent back to the beacon node for broadcasting. -func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]byte) { - if slot == 0 { - log.Debug("Assigned to genesis slot, skipping proposal") - return - } - ctx, span := trace.StartSpan(ctx, "validator.ProposeBlock") - defer span.End() - fmtKey := fmt.Sprintf("%#x", pubKey[:]) - - span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey))) - log := log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))) - - // Sign randao reveal, it's used to request block from beacon node - epoch := slot / params.BeaconConfig().SlotsPerEpoch - randaoReveal, err := v.signRandaoReveal(ctx, pubKey, epoch) - if err != nil { - log.WithError(err).Error("Failed to sign randao reveal") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - // Request block from beacon node - b, err := v.validatorClient.GetBlock(ctx, ðpb.BlockRequest{ - Slot: slot, - RandaoReveal: randaoReveal, - Graffiti: v.graffiti, - }) - if err != nil { - log.WithField("blockSlot", slot).WithError(err).Error("Failed to request block from beacon node") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - var slotBits bitfield.Bitlist - if featureconfig.Get().ProtectProposer { - slotBits, err = v.db.ProposalHistoryForEpoch(ctx, pubKey[:], epoch) - if err != nil { - log.WithError(err).Error("Failed to get proposal history") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - // If the bit for the current slot is marked, do not propose. - if slotBits.BitAt(slot % params.BeaconConfig().SlotsPerEpoch) { - log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - } - - if featureconfig.Get().SlasherProtection && v.protector != nil { - bh, err := blockutil.BeaconBlockHeaderFromBlock(b) - if err != nil { - log.WithError(err).Error("Failed to get block header from block") - } - if !v.protector.VerifyBlock(ctx, bh) { - log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected by external slasher") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() - } - return - } - } - - // Sign returned block from beacon node - sig, err := v.signBlock(ctx, pubKey, epoch, b) - if err != nil { - log.WithError(err).Error("Failed to sign block") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - blk := ðpb.SignedBeaconBlock{ - Block: b, - Signature: sig, - } - - // Propose and broadcast block via beacon node - blkResp, err := v.validatorClient.ProposeBlock(ctx, blk) - if err != nil { - log.WithError(err).Error("Failed to propose block") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - - if featureconfig.Get().SlasherProtection && v.protector != nil { - sbh, err := blockutil.SignedBeaconBlockHeaderFromBlock(blk) - if err != nil { - log.WithError(err).Error("Failed to get block header from block") - } - if !v.protector.CommitBlock(ctx, sbh) { - log.WithField("epoch", epoch).Fatal("Tried to sign a double proposal, rejected by external slasher") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc() - } - return - } - } - - if featureconfig.Get().ProtectProposer { - slotBits.SetBitAt(slot%params.BeaconConfig().SlotsPerEpoch, true) - if err := v.db.SaveProposalHistoryForEpoch(ctx, pubKey[:], epoch, slotBits); err != nil { - log.WithError(err).Error("Failed to save updated proposal history") - if v.emitAccountMetrics { - metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - } - - if v.emitAccountMetrics { - metrics.ValidatorProposeSuccessVec.WithLabelValues(fmtKey).Inc() - } - - span.AddAttributes( - trace.StringAttribute("blockRoot", fmt.Sprintf("%#x", blkResp.BlockRoot)), - trace.Int64Attribute("numDeposits", int64(len(b.Body.Deposits))), - trace.Int64Attribute("numAttestations", int64(len(b.Body.Attestations))), - ) - - blkRoot := fmt.Sprintf("%#x", bytesutil.Trunc(blkResp.BlockRoot)) - log.WithFields(logrus.Fields{ - "slot": b.Slot, - "blockRoot": blkRoot, - "numAttestations": len(b.Body.Attestations), - "numDeposits": len(b.Body.Deposits), - }).Info("Submitted new block") -} - -// ProposeExit -- -func (v *validator) ProposeExit(ctx context.Context, exit *ethpb.VoluntaryExit) error { - return errors.New("unimplemented") -} - -// Sign randao reveal with randao domain and private key. -func (v *validator) signRandaoReveal(ctx context.Context, pubKey [48]byte, epoch uint64) ([]byte, error) { - domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainRandao[:]) - if err != nil { - return nil, errors.Wrap(err, "could not get domain data") - } - - randaoReveal, err := v.signObject(pubKey, epoch, domain.SignatureDomain) - if err != nil { - return nil, errors.Wrap(err, "could not sign reveal") - } - return randaoReveal.Marshal(), nil -} - -// Sign block with proposer domain and private key. -func (v *validator) signBlock(ctx context.Context, pubKey [48]byte, epoch uint64, b *ethpb.BeaconBlock) ([]byte, error) { - domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainBeaconProposer[:]) - if err != nil { - return nil, errors.Wrap(err, "could not get domain data") - } - var sig bls.Signature - if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported { - bodyRoot, err := stateutil.BlockBodyRoot(b.Body) - if err != nil { - return nil, errors.Wrap(err, "could not get signing root") - } - blockHeader := ðpb.BeaconBlockHeader{ - Slot: b.Slot, - ProposerIndex: b.ProposerIndex, - StateRoot: b.StateRoot, - ParentRoot: b.ParentRoot, - BodyRoot: bodyRoot[:], - } - sig, err = protectingKeymanager.SignProposal(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), blockHeader) - if err != nil { - return nil, errors.Wrap(err, "could not sign block proposal") - } - } else { - blockRoot, err := helpers.ComputeSigningRoot(b, domain.SignatureDomain) - if err != nil { - return nil, errors.Wrap(err, "could not get signing root") - } - sig, err = v.keyManager.Sign(pubKey, blockRoot) - if err != nil { - return nil, errors.Wrap(err, "could not sign block proposal") - } - } - return sig.Marshal(), nil -} diff --git a/validator/client/streaming/propose_test.go b/validator/client/streaming/propose_test.go deleted file mode 100644 index 69f56e20af..0000000000 --- a/validator/client/streaming/propose_test.go +++ /dev/null @@ -1,345 +0,0 @@ -package streaming - -import ( - "context" - "errors" - "testing" - - "github.com/golang/mock/gomock" - lru "github.com/hashicorp/golang-lru" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - dbTest "github.com/prysmaticlabs/prysm/validator/db/testing" - logTest "github.com/sirupsen/logrus/hooks/test" - - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/mock" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -type mocks struct { - validatorClient *mock.MockBeaconNodeValidatorClient -} - -func setup(t *testing.T) (*validator, *mocks, func()) { - valDB := dbTest.SetupDB(t, [][48]byte{validatorPubKey}) - ctrl := gomock.NewController(t) - m := &mocks{ - validatorClient: mock.NewMockBeaconNodeValidatorClient(ctrl), - } - - aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot)) - if err != nil { - t.Fatal(err) - } - cleanMap := make(map[uint64]uint64) - cleanMap[0] = params.BeaconConfig().FarFutureEpoch - clean := &slashpb.AttestationHistory{ - TargetToSource: cleanMap, - } - attHistoryByPubKey := make(map[[48]byte]*slashpb.AttestationHistory) - attHistoryByPubKey[validatorPubKey] = clean - - validator := &validator{ - db: valDB, - validatorClient: m.validatorClient, - keyManager: testKeyManager, - graffiti: []byte{}, - attLogs: make(map[[32]byte]*attSubmitted), - aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache, - attesterHistoryByPubKey: attHistoryByPubKey, - } - - return validator, m, ctrl.Finish -} - -func TestProposeBlock_DoesNotProposeGenesisBlock(t *testing.T) { - hook := logTest.NewGlobal() - validator, _, finish := setup(t) - defer finish() - validator.ProposeBlock(context.Background(), 0, validatorPubKey) - - testutil.AssertLogsContain(t, hook, "Assigned to genesis slot, skipping proposal") -} - -func TestProposeBlock_DomainDataFailed(t *testing.T) { - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(nil /*response*/, errors.New("uh oh")) - - validator.ProposeBlock(context.Background(), 1, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Failed to sign randao reveal") -} - -func TestProposeBlock_RequestBlockFailed(t *testing.T) { - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), // block request - ).Return(nil /*response*/, errors.New("uh oh")) - - validator.ProposeBlock(context.Background(), 1, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Failed to request block from beacon node") -} - -func TestProposeBlock_ProposeBlockFailed(t *testing.T) { - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Return(nil /*response*/, errors.New("uh oh")) - - validator.ProposeBlock(context.Background(), 1, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Failed to propose block") -} - -func TestProposeBlock_BlocksDoubleProposal(t *testing.T) { - cfg := &featureconfig.Flags{ - ProtectProposer: true, - } - reset := featureconfig.InitWithReset(cfg) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Times(2).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Return(ðpb.ProposeResponse{}, nil /*error*/) - - slot := params.BeaconConfig().SlotsPerEpoch*5 + 2 - validator.ProposeBlock(context.Background(), slot, validatorPubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") - - validator.ProposeBlock(context.Background(), slot, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Tried to sign a double proposal") -} - -func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) { - cfg := &featureconfig.Flags{ - ProtectProposer: true, - } - reset := featureconfig.InitWithReset(cfg) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Times(2).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Return(ðpb.ProposeResponse{}, nil /*error*/) - - farFuture := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch - validator.ProposeBlock(context.Background(), farFuture, validatorPubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") - - validator.ProposeBlock(context.Background(), farFuture, validatorPubKey) - testutil.AssertLogsContain(t, hook, "Tried to sign a double proposal") -} - -func TestProposeBlock_AllowsPastProposals(t *testing.T) { - cfg := &featureconfig.Flags{ - ProtectProposer: true, - } - reset := featureconfig.InitWithReset(cfg) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Times(2).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Times(2).Return(ðpb.ProposeResponse{}, nil /*error*/) - - farAhead := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch - validator.ProposeBlock(context.Background(), farAhead, validatorPubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") - - past := (params.BeaconConfig().WeakSubjectivityPeriod - 400) * params.BeaconConfig().SlotsPerEpoch - validator.ProposeBlock(context.Background(), past, validatorPubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") -} - -func TestProposeBlock_AllowsSameEpoch(t *testing.T) { - cfg := &featureconfig.Flags{ - ProtectProposer: true, - } - reset := featureconfig.InitWithReset(cfg) - defer reset() - hook := logTest.NewGlobal() - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Times(2).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Times(2).Return(ðpb.ProposeResponse{}, nil /*error*/) - - pubKey := validatorPubKey - farAhead := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch - validator.ProposeBlock(context.Background(), farAhead, pubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") - - validator.ProposeBlock(context.Background(), farAhead-4, pubKey) - testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") -} - -func TestProposeBlock_BroadcastsBlock(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).Return(ðpb.ProposeResponse{}, nil /*error*/) - - validator.ProposeBlock(context.Background(), 1, validatorPubKey) -} - -func TestProposeBlock_BroadcastsBlock_WithGraffiti(t *testing.T) { - validator, m, finish := setup(t) - defer finish() - - validator.graffiti = []byte("12345678901234567890123456789012") - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().GetBlock( - gomock.Any(), // ctx - gomock.Any(), - ).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Graffiti: validator.graffiti}}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), //epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - var sentBlock *ethpb.SignedBeaconBlock - - m.validatorClient.EXPECT().ProposeBlock( - gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), - ).DoAndReturn(func(ctx context.Context, block *ethpb.SignedBeaconBlock) (*ethpb.ProposeResponse, error) { - sentBlock = block - return ðpb.ProposeResponse{}, nil - }) - - validator.ProposeBlock(context.Background(), 1, validatorPubKey) - - if string(sentBlock.Block.Body.Graffiti) != string(validator.graffiti) { - t.Errorf("Block was broadcast with the wrong graffiti field, wanted \"%v\", got \"%v\"", string(validator.graffiti), string(sentBlock.Block.Body.Graffiti)) - } -} diff --git a/validator/client/streaming/runner.go b/validator/client/streaming/runner.go deleted file mode 100644 index 981f9c3a87..0000000000 --- a/validator/client/streaming/runner.go +++ /dev/null @@ -1,153 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/params" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Validator interface defines the primary methods of a validator client. -type Validator interface { - Done() - WaitForChainStart(ctx context.Context) error - WaitForSync(ctx context.Context) error - WaitForSynced(ctx context.Context) error - WaitForActivation(ctx context.Context) error - NextSlot() <-chan uint64 - CurrentSlot() uint64 - SlotDeadline(slot uint64) time.Time - LogValidatorGainsAndLosses(ctx context.Context, slot uint64) error - StreamDuties(ctx context.Context) error - UpdateProtections(ctx context.Context, slot uint64) error - RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]validatorRole, error) // validator pubKey -> roles - SubmitAttestation(ctx context.Context, slot uint64, pubKey [48]byte) - ProposeBlock(ctx context.Context, slot uint64, pubKey [48]byte) - SubmitAggregateAndProof(ctx context.Context, slot uint64, pubKey [48]byte) - LogAttestationsSubmitted() - SaveProtections(ctx context.Context) error - UpdateDomainDataCaches(ctx context.Context, slot uint64) -} - -// Run the main validator routine. This routine exits if the context is -// canceled. -// -// Order of operations: -// 1 - Initialize validator data -// 2 - Wait for validator activation -// 3 - Listen to a server-side stream of validator duties -// 4 - Wait for the next slot start -// 5 - Determine role at current slot -// 6 - Perform assigned role, if any -func run(ctx context.Context, v Validator) { - defer v.Done() - if featureconfig.Get().WaitForSynced { - if err := v.WaitForSynced(ctx); err != nil { - log.Fatalf("Could not determine if chain started and beacon node is synced: %v", err) - } - } else { - if err := v.WaitForChainStart(ctx); err != nil { - log.Fatalf("Could not determine if beacon chain started: %v", err) - } - if err := v.WaitForSync(ctx); err != nil { - log.Fatalf("Could not determine if beacon node synced: %v", err) - } - } - if err := v.WaitForActivation(ctx); err != nil { - log.Fatalf("Could not wait for validator activation: %v", err) - } - // We listen to a server-side stream of validator duties in the - // background of the validator client. - go func() { - if err := v.StreamDuties(ctx); err != nil { - handleAssignmentError(err, v.CurrentSlot()) - } - }() - for { - ctx, span := trace.StartSpan(ctx, "validator.processSlot") - - select { - case <-ctx.Done(): - log.Info("Context canceled, stopping validator") - return // Exit if context is canceled. - case slot := <-v.NextSlot(): - span.AddAttributes(trace.Int64Attribute("slot", int64(slot))) - deadline := v.SlotDeadline(slot) - slotCtx, _ := context.WithDeadline(ctx, deadline) - // Report this validator client's rewards and penalties throughout its lifecycle. - log := log.WithField("slot", slot) - log.WithField("deadline", deadline).Debug("Set deadline for proposals and attestations") - if err := v.LogValidatorGainsAndLosses(slotCtx, slot); err != nil { - log.WithError(err).Error("Could not report validator's rewards/penalties") - } - - if featureconfig.Get().ProtectAttester { - if err := v.UpdateProtections(ctx, slot); err != nil { - log.WithError(err).Error("Could not update validator protection") - } - } - - // Start fetching domain data for the next epoch. - if helpers.IsEpochEnd(slot) { - go v.UpdateDomainDataCaches(ctx, slot+1) - } - - var wg sync.WaitGroup - - allRoles, err := v.RolesAt(ctx, slot) - if err != nil { - log.WithError(err).Error("Could not get validator roles") - continue - } - for id, roles := range allRoles { - wg.Add(len(roles)) - for _, role := range roles { - go func(role validatorRole, id [48]byte) { - defer wg.Done() - switch role { - case roleAttester: - v.SubmitAttestation(slotCtx, slot, id) - case roleProposer: - v.ProposeBlock(slotCtx, slot, id) - case roleAggregator: - v.SubmitAggregateAndProof(slotCtx, slot, id) - case roleUnknown: - log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(id[:]))).Trace("No active roles, doing nothing") - default: - log.Warnf("Unhandled role %v", role) - } - }(role, id) - } - } - // Wait for all processes to complete, then report span complete. - go func() { - wg.Wait() - v.LogAttestationsSubmitted() - if featureconfig.Get().ProtectAttester { - if err := v.SaveProtections(ctx); err != nil { - log.WithError(err).Error("Could not save validator protection") - } - } - span.End() - }() - } - } -} - -func handleAssignmentError(err error, slot uint64) { - if errCode, ok := status.FromError(err); ok && errCode.Code() == codes.NotFound { - log.WithField( - "epoch", slot/params.BeaconConfig().SlotsPerEpoch, - ).Warn("Validator not yet assigned to epoch") - } else { - log.WithField("error", err).Error("Failed to update assignments") - } -} diff --git a/validator/client/streaming/runner_test.go b/validator/client/streaming/runner_test.go deleted file mode 100644 index 626d6e87cc..0000000000 --- a/validator/client/streaming/runner_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package streaming - -import ( - "context" - "testing" - "time" - - "github.com/prysmaticlabs/prysm/shared/featureconfig" -) - -func cancelledContext() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - return ctx -} - -func TestCancelledContext_CleansUpValidator(t *testing.T) { - v := &fakeValidator{} - run(cancelledContext(), v) - if !v.DoneCalled { - t.Error("Expected Done() to be called") - } -} - -func TestCancelledContext_WaitsForChainStart(t *testing.T) { - v := &fakeValidator{} - run(cancelledContext(), v) - if !v.WaitForChainStartCalled { - t.Error("Expected WaitForChainStart() to be called") - } -} - -func TestCancelledContext_WaitsForSynced(t *testing.T) { - cfg := &featureconfig.Flags{ - WaitForSynced: true, - } - reset := featureconfig.InitWithReset(cfg) - defer reset() - v := &fakeValidator{} - run(cancelledContext(), v) - if !v.WaitForSyncedCalled { - t.Error("Expected WaitForSynced() to be called") - } -} - -func TestCancelledContext_WaitsForActivation(t *testing.T) { - v := &fakeValidator{} - run(cancelledContext(), v) - if !v.WaitForActivationCalled { - t.Error("Expected WaitForActivation() to be called") - } -} - -func TestRoleAt_NextSlot(t *testing.T) { - v := &fakeValidator{} - ctx, cancel := context.WithCancel(context.Background()) - - slot := uint64(55) - ticker := make(chan uint64) - v.NextSlotRet = ticker - go func() { - ticker <- slot - - cancel() - }() - - run(ctx, v) - - if !v.RoleAtCalled { - t.Fatalf("Expected RoleAt(%d) to be called", slot) - } - if v.RoleAtArg1 != slot { - t.Errorf("RoleAt called with the wrong arg. Want=%d, got=%d", slot, v.RoleAtArg1) - } -} - -func TestAttests_NextSlot(t *testing.T) { - v := &fakeValidator{} - ctx, cancel := context.WithCancel(context.Background()) - - slot := uint64(55) - ticker := make(chan uint64) - v.NextSlotRet = ticker - v.RolesAtRet = []validatorRole{roleAttester} - go func() { - ticker <- slot - - cancel() - }() - timer := time.NewTimer(200 * time.Millisecond) - run(ctx, v) - <-timer.C - if !v.AttestToBlockHeadCalled { - t.Fatalf("SubmitAttestation(%d) was not called", slot) - } - if v.AttestToBlockHeadArg1 != slot { - t.Errorf("SubmitAttestation was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1) - } -} - -func TestProposes_NextSlot(t *testing.T) { - v := &fakeValidator{} - ctx, cancel := context.WithCancel(context.Background()) - - slot := uint64(55) - ticker := make(chan uint64) - v.NextSlotRet = ticker - v.RolesAtRet = []validatorRole{roleProposer} - go func() { - ticker <- slot - - cancel() - }() - timer := time.NewTimer(200 * time.Millisecond) - run(ctx, v) - <-timer.C - if !v.ProposeBlockCalled { - t.Fatalf("ProposeBlock(%d) was not called", slot) - } - if v.ProposeBlockArg1 != slot { - t.Errorf("ProposeBlock was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1) - } -} - -func TestBothProposesAndAttests_NextSlot(t *testing.T) { - v := &fakeValidator{} - ctx, cancel := context.WithCancel(context.Background()) - - slot := uint64(55) - ticker := make(chan uint64) - v.NextSlotRet = ticker - v.RolesAtRet = []validatorRole{roleAttester, roleProposer} - go func() { - ticker <- slot - - cancel() - }() - timer := time.NewTimer(200 * time.Millisecond) - run(ctx, v) - <-timer.C - if !v.AttestToBlockHeadCalled { - t.Fatalf("SubmitAttestation(%d) was not called", slot) - } - if v.AttestToBlockHeadArg1 != slot { - t.Errorf("SubmitAttestation was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1) - } - if !v.ProposeBlockCalled { - t.Fatalf("ProposeBlock(%d) was not called", slot) - } - if v.ProposeBlockArg1 != slot { - t.Errorf("ProposeBlock was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1) - } -} diff --git a/validator/client/streaming/service.go b/validator/client/streaming/service.go deleted file mode 100644 index 5be6af371c..0000000000 --- a/validator/client/streaming/service.go +++ /dev/null @@ -1,249 +0,0 @@ -package streaming - -import ( - "context" - "strings" - - "github.com/dgraph-io/ristretto" - middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" - grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/grpcutils" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/validator/db/kv" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection" - "github.com/sirupsen/logrus" - "go.opencensus.io/plugin/ocgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" -) - -var log = logrus.WithField("prefix", "validator") - -// ValidatorService represents a service to manage the validator client -// routine. -type ValidatorService struct { - ctx context.Context - cancel context.CancelFunc - validator Validator - graffiti []byte - conn *grpc.ClientConn - endpoint string - withCert string - dataDir string - keyManager keymanager.KeyManager - logValidatorBalances bool - emitAccountMetrics bool - maxCallRecvMsgSize int - grpcRetries uint - grpcHeaders []string - protector slashingprotection.Protector -} - -// Config for the validator service. -type Config struct { - Endpoint string - DataDir string - CertFlag string - GraffitiFlag string - KeyManager keymanager.KeyManager - LogValidatorBalances bool - EmitAccountMetrics bool - GrpcMaxCallRecvMsgSizeFlag int - GrpcRetriesFlag uint - GrpcHeadersFlag string - Protector slashingprotection.Protector -} - -// NewValidatorService creates a new validator service for the service -// registry. -func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, error) { - ctx, cancel := context.WithCancel(ctx) - return &ValidatorService{ - ctx: ctx, - cancel: cancel, - endpoint: cfg.Endpoint, - withCert: cfg.CertFlag, - dataDir: cfg.DataDir, - graffiti: []byte(cfg.GraffitiFlag), - keyManager: cfg.KeyManager, - logValidatorBalances: cfg.LogValidatorBalances, - emitAccountMetrics: cfg.EmitAccountMetrics, - maxCallRecvMsgSize: cfg.GrpcMaxCallRecvMsgSizeFlag, - grpcRetries: cfg.GrpcRetriesFlag, - grpcHeaders: strings.Split(cfg.GrpcHeadersFlag, ","), - protector: cfg.Protector, - }, nil -} - -// Start the validator service. Launches the main go routine for the validator -// client. -func (v *ValidatorService) Start() { - streamInterceptor := grpc.WithStreamInterceptor(middleware.ChainStreamClient( - grpc_opentracing.StreamClientInterceptor(), - grpc_prometheus.StreamClientInterceptor, - grpc_retry.StreamClientInterceptor(), - )) - dialOpts := ConstructDialOptions( - v.maxCallRecvMsgSize, v.withCert, v.grpcHeaders, v.grpcRetries, streamInterceptor) - if dialOpts == nil { - return - } - conn, err := grpc.DialContext(v.ctx, v.endpoint, dialOpts...) - if err != nil { - log.Errorf("Could not dial endpoint: %s, %v", v.endpoint, err) - return - } - log.Debug("Successfully started gRPC connection") - - pubkeys, err := v.keyManager.FetchValidatingKeys() - if err != nil { - log.Errorf("Could not get validating keys: %v", err) - return - } - - valDB, err := kv.NewKVStore(v.dataDir, pubkeys) - if err != nil { - log.Errorf("Could not initialize db: %v", err) - return - } - - v.conn = conn - cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1920, // number of keys to track. - MaxCost: 192, // maximum cost of cache, 1 item = 1 cost. - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - - aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot)) - if err != nil { - log.Errorf("Could not initialize cache: %v", err) - return - } - - v.validator = &validator{ - db: valDB, - dutiesByEpoch: make(map[uint64][]*ethpb.DutiesResponse_Duty, 2), // 2 epochs worth of duties. - validatorClient: ethpb.NewBeaconNodeValidatorClient(v.conn), - beaconClient: ethpb.NewBeaconChainClient(v.conn), - node: ethpb.NewNodeClient(v.conn), - keyManager: v.keyManager, - graffiti: v.graffiti, - logValidatorBalances: v.logValidatorBalances, - emitAccountMetrics: v.emitAccountMetrics, - prevBalance: make(map[[48]byte]uint64), - attLogs: make(map[[32]byte]*attSubmitted), - domainDataCache: cache, - aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache, - protector: v.protector, - } - go run(v.ctx, v.validator) -} - -// Stop the validator service. -func (v *ValidatorService) Stop() error { - v.cancel() - log.Info("Stopping service") - if v.conn != nil { - return v.conn.Close() - } - return nil -} - -// Status of the validator service's health. -func (v *ValidatorService) Status() error { - if v.conn == nil { - return errors.New("no connection to beacon RPC") - } - return nil -} - -// signObject signs a generic object, with protection if available. -func (v *validator) signObject(pubKey [48]byte, object interface{}, domain []byte) (bls.Signature, error) { - if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported { - root, err := ssz.HashTreeRoot(object) - if err != nil { - return nil, err - } - return protectingKeymanager.SignGeneric(pubKey, root, bytesutil.ToBytes32(domain)) - } - - root, err := helpers.ComputeSigningRoot(object, domain) - if err != nil { - return nil, err - } - return v.keyManager.Sign(pubKey, root) -} - -// ConstructDialOptions constructs a list of grpc dial options -func ConstructDialOptions( - maxCallRecvMsgSize int, - withCert string, - grpcHeaders []string, - grpcRetries uint, - extraOpts ...grpc.DialOption, -) []grpc.DialOption { - var transportSecurity grpc.DialOption - if withCert != "" { - creds, err := credentials.NewClientTLSFromFile(withCert, "") - if err != nil { - log.Errorf("Could not get valid credentials: %v", err) - return nil - } - transportSecurity = grpc.WithTransportCredentials(creds) - } else { - transportSecurity = grpc.WithInsecure() - log.Warn("You are using an insecure gRPC connection! Please provide a certificate and key to use a secure connection.") - } - - if maxCallRecvMsgSize == 0 { - maxCallRecvMsgSize = 10 * 5 << 20 // Default 50Mb - } - - md := make(metadata.MD) - for _, hdr := range grpcHeaders { - if hdr != "" { - ss := strings.Split(hdr, "=") - if len(ss) != 2 { - log.Warnf("Incorrect gRPC header flag format. Skipping %v", hdr) - continue - } - md.Set(ss[0], ss[1]) - } - } - - dialOpts := []grpc.DialOption{ - transportSecurity, - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(maxCallRecvMsgSize), - grpc_retry.WithMax(grpcRetries), - grpc.Header(&md), - ), - grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), - grpc.WithUnaryInterceptor(middleware.ChainUnaryClient( - grpc_opentracing.UnaryClientInterceptor(), - grpc_prometheus.UnaryClientInterceptor, - grpc_retry.UnaryClientInterceptor(), - grpcutils.LogGRPCRequests, - )), - } - - for _, opt := range extraOpts { - dialOpts = append(dialOpts, opt) - } - - return dialOpts -} diff --git a/validator/client/streaming/service_test.go b/validator/client/streaming/service_test.go deleted file mode 100644 index f9a7487bf0..0000000000 --- a/validator/client/streaming/service_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package streaming - -import ( - "context" - "os" - "strings" - "testing" - "time" - - "github.com/prysmaticlabs/prysm/shared" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/keystore" - "github.com/prysmaticlabs/prysm/shared/testutil" - v1 "github.com/prysmaticlabs/prysm/validator/accounts/v1" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - logTest "github.com/sirupsen/logrus/hooks/test" -) - -var _ = shared.Service(&ValidatorService{}) -var validatorKey *keystore.Key -var validatorPubKey [48]byte -var keyMap map[[48]byte]*keystore.Key -var keyMapThreeValidators map[[48]byte]*keystore.Key -var testKeyManager keymanager.KeyManager -var testKeyManagerThreeValidators keymanager.KeyManager - -func keySetup() { - keyMap = make(map[[48]byte]*keystore.Key) - keyMapThreeValidators = make(map[[48]byte]*keystore.Key) - - var err error - validatorKey, err = keystore.NewKey() - if err != nil { - log.WithError(err).Debug("Cannot create key") - } - copy(validatorPubKey[:], validatorKey.PublicKey.Marshal()) - keyMap[validatorPubKey] = validatorKey - - sks := make([]bls.SecretKey, 1) - sks[0] = validatorKey.SecretKey - testKeyManager = keymanager.NewDirect(sks) - - sks = make([]bls.SecretKey, 3) - for i := 0; i < 3; i++ { - vKey, err := keystore.NewKey() - if err != nil { - log.WithError(err).Debug("Cannot create key") - } - var pubKey [48]byte - copy(pubKey[:], vKey.PublicKey.Marshal()) - keyMapThreeValidators[pubKey] = vKey - sks[i] = vKey.SecretKey - } - testKeyManagerThreeValidators = keymanager.NewDirect(sks) -} - -func TestMain(m *testing.M) { - dir := testutil.TempDir() + "/keystore1" - defer func() { - if err := os.RemoveAll(dir); err != nil { - log.WithError(err).Debug("Cannot remove keystore folder") - } - }() - if err := v1.NewValidatorAccount(dir, "1234"); err != nil { - log.WithError(err).Debug("Cannot create validator account") - } - keySetup() - os.Exit(m.Run()) -} - -func TestStop_CancelsContext(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - vs := &ValidatorService{ - ctx: ctx, - cancel: cancel, - } - - if err := vs.Stop(); err != nil { - t.Error(err) - } - - select { - case <-time.After(1 * time.Second): - t.Error("Context not canceled within 1s") - case <-vs.ctx.Done(): - } -} - -func TestLifecycle(t *testing.T) { - hook := logTest.NewGlobal() - // Use canceled context so that the run function exits immediately.. - ctx, cancel := context.WithCancel(context.Background()) - cancel() - validatorService := &ValidatorService{ - ctx: ctx, - cancel: cancel, - endpoint: "merkle tries", - withCert: "alice.crt", - keyManager: keymanager.NewDirect(nil), - } - validatorService.Start() - if err := validatorService.Stop(); err != nil { - t.Fatalf("Could not stop service: %v", err) - } - testutil.AssertLogsContain(t, hook, "Stopping service") -} - -func TestLifecycle_Insecure(t *testing.T) { - hook := logTest.NewGlobal() - // Use canceled context so that the run function exits immediately. - ctx, cancel := context.WithCancel(context.Background()) - cancel() - validatorService := &ValidatorService{ - ctx: ctx, - cancel: cancel, - endpoint: "merkle tries", - keyManager: keymanager.NewDirect(nil), - } - validatorService.Start() - testutil.AssertLogsContain(t, hook, "You are using an insecure gRPC connection") - if err := validatorService.Stop(); err != nil { - t.Fatalf("Could not stop service: %v", err) - } - testutil.AssertLogsContain(t, hook, "Stopping service") -} - -func TestStatus_NoConnectionError(t *testing.T) { - validatorService := &ValidatorService{} - if err := validatorService.Status(); !strings.Contains(err.Error(), "no connection") { - t.Errorf("Expected status check to fail if no connection is found, received: %v", err) - } -} diff --git a/validator/client/streaming/validator.go b/validator/client/streaming/validator.go deleted file mode 100644 index fb950bd84f..0000000000 --- a/validator/client/streaming/validator.go +++ /dev/null @@ -1,462 +0,0 @@ -// Package streaming represents a gRPC stream-based implementation -// of an eth2 validator client. -package streaming - -import ( - "context" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "strconv" - "strings" - "sync" - "time" - - "github.com/dgraph-io/ristretto" - "github.com/gogo/protobuf/proto" - ptypes "github.com/gogo/protobuf/types" - lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/hashutil" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" - vdb "github.com/prysmaticlabs/prysm/validator/db" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -type validatorRole int8 - -const ( - roleUnknown = iota - roleAttester - roleProposer - roleAggregator -) - -type validator struct { - genesisTime uint64 - ticker *slotutil.SlotTicker - db vdb.Database - dutiesLock sync.RWMutex - dutiesByEpoch map[uint64][]*ethpb.DutiesResponse_Duty - validatorClient ethpb.BeaconNodeValidatorClient - beaconClient ethpb.BeaconChainClient - graffiti []byte - node ethpb.NodeClient - keyManager keymanager.KeyManager - prevBalance map[[48]byte]uint64 - logValidatorBalances bool - emitAccountMetrics bool - attLogs map[[32]byte]*attSubmitted - attLogsLock sync.Mutex - domainDataLock sync.Mutex - domainDataCache *ristretto.Cache - aggregatedSlotCommitteeIDCache *lru.Cache - aggregatedSlotCommitteeIDCacheLock sync.Mutex - attesterHistoryByPubKey map[[48]byte]*slashpb.AttestationHistory - attesterHistoryByPubKeyLock sync.RWMutex - protector slashingprotection.Protector -} - -// Done cleans up the validator. -func (v *validator) Done() { - v.ticker.Done() -} - -// WaitForChainStart checks whether the beacon node has started its runtime. That is, -// it calls to the beacon node which then verifies the ETH1.0 deposit contract logs to check -// for the ChainStart log to have been emitted. If so, it starts a ticker based on the ChainStart -// unix timestamp which will be used to keep track of time within the validator client. -func (v *validator) WaitForChainStart(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "validator.WaitForChainStart") - defer span.End() - // First, check if the beacon chain has started. - stream, err := v.validatorClient.WaitForChainStart(ctx, &ptypes.Empty{}) - if err != nil { - return errors.Wrap(err, "could not setup beacon chain ChainStart streaming client") - } - for { - log.Info("Waiting for beacon chain start log from the ETH 1.0 deposit contract") - chainStartRes, err := stream.Recv() - // If the stream is closed, we stop the loop. - if err == io.EOF { - break - } - // If context is canceled we stop the loop. - if ctx.Err() == context.Canceled { - return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") - } - if err != nil { - return errors.Wrap(err, "could not receive ChainStart from stream") - } - v.genesisTime = chainStartRes.GenesisTime - break - } - // Once the ChainStart log is received, we update the genesis time of the validator client - // and begin a slot ticker used to track the current slot the beacon node is in. - v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot) - log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Beacon chain genesis") - return nil -} - -// WaitForSync checks whether the beacon node has sync to the latest head. -func (v *validator) WaitForSync(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "validator.WaitForSync") - defer span.End() - - s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{}) - if err != nil { - return errors.Wrap(err, "could not get sync status") - } - if !s.Syncing { - return nil - } - - for { - select { - // Poll every half slot. - case <-time.After(slotutil.DivideSlotBy(2 /* twice per slot */)): - s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{}) - if err != nil { - return errors.Wrap(err, "could not get sync status") - } - if !s.Syncing { - return nil - } - log.Info("Waiting for beacon node to sync to latest chain head") - case <-ctx.Done(): - return errors.New("context has been canceled, exiting goroutine") - } - } -} - -// WaitForSynced opens a stream with the beacon chain node so it can be informed of when the beacon node is -// fully synced and ready to communicate with the validator. -func (v *validator) WaitForSynced(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "validator.WaitForSynced") - defer span.End() - // First, check if the beacon chain has started. - stream, err := v.validatorClient.WaitForSynced(ctx, &ptypes.Empty{}) - if err != nil { - return errors.Wrap(err, "could not setup beacon chain Synced streaming client") - } - for { - log.Info("Waiting for chainstart to occur and the beacon node to be fully synced") - syncedRes, err := stream.Recv() - // If the stream is closed, we stop the loop. - if err == io.EOF { - break - } - // If context is canceled we stop the loop. - if ctx.Err() == context.Canceled { - return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") - } - if err != nil { - return errors.Wrap(err, "could not receive Synced from stream") - } - v.genesisTime = syncedRes.GenesisTime - break - } - // Once the Synced log is received, we update the genesis time of the validator client - // and begin a slot ticker used to track the current slot the beacon node is in. - v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot) - log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Chain has started and the beacon node is synced") - return nil -} - -// WaitForActivation checks whether the validator pubkey is in the active -// validator set. If not, this operation will block until an activation message is -// received. -func (v *validator) WaitForActivation(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "validator.WaitForActivation") - defer span.End() - validatingKeys, err := v.keyManager.FetchValidatingKeys() - if err != nil { - return errors.Wrap(err, "could not fetch validating keys") - } - req := ðpb.ValidatorActivationRequest{ - PublicKeys: bytesutil.FromBytes48Array(validatingKeys), - } - stream, err := v.validatorClient.WaitForActivation(ctx, req) - if err != nil { - return errors.Wrap(err, "could not setup validator WaitForActivation streaming client") - } - for { - res, err := stream.Recv() - // If the stream is closed, we stop the loop. - if err == io.EOF { - break - } - // If context is canceled we stop the loop. - if ctx.Err() == context.Canceled { - return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") - } - if err != nil { - return errors.Wrap(err, "could not receive validator activation from stream") - } - valActivated := v.checkAndLogValidatorStatus(res.Statuses) - - if valActivated { - for _, statusResp := range res.Statuses { - if statusResp.Status.Status != ethpb.ValidatorStatus_ACTIVE { - continue - } - log.WithFields(logrus.Fields{ - "publicKey": fmt.Sprintf("%#x", bytesutil.Trunc(statusResp.PublicKey)), - "index": statusResp.Index, - }).Info("Validator activated") - } - break - } - } - v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot) - - return nil -} - -func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.ValidatorActivationResponse_Status) bool { - nonexistentIndex := ^uint64(0) - var validatorActivated bool - for _, status := range validatorStatuses { - fields := logrus.Fields{ - "pubKey": fmt.Sprintf("%#x", bytesutil.Trunc(status.PublicKey[:])), - "status": status.Status.Status.String(), - } - if status.Index != nonexistentIndex { - fields["index"] = status.Index - } - log := log.WithFields(fields) - if v.emitAccountMetrics { - fmtKey := fmt.Sprintf("%#x", status.PublicKey) - metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status)) - } - switch status.Status.Status { - case ethpb.ValidatorStatus_UNKNOWN_STATUS: - log.Info("Waiting for deposit to be observed by beacon node") - case ethpb.ValidatorStatus_DEPOSITED: - if status.Status.DepositInclusionSlot != 0 { - log.WithFields(logrus.Fields{ - "expectedInclusionSlot": status.Status.DepositInclusionSlot, - "eth1DepositBlockNumber": status.Status.Eth1DepositBlockNumber, - }).Info("Deposit for validator received but not processed into the beacon state") - } else { - log.WithField( - "positionInActivationQueue", status.Status.PositionInActivationQueue, - ).Info("Deposit processed, entering activation queue after finalization") - } - case ethpb.ValidatorStatus_PENDING: - if status.Status.ActivationEpoch == params.BeaconConfig().FarFutureEpoch { - log.WithFields(logrus.Fields{ - "positionInActivationQueue": status.Status.PositionInActivationQueue, - }).Info("Waiting to be assigned activation epoch") - } else { - log.WithFields(logrus.Fields{ - "activationEpoch": status.Status.ActivationEpoch, - }).Info("Waiting for activation") - } - case ethpb.ValidatorStatus_ACTIVE: - validatorActivated = true - case ethpb.ValidatorStatus_EXITED: - log.Info("Validator exited") - default: - log.WithFields(logrus.Fields{ - "activationEpoch": status.Status.ActivationEpoch, - }).Info("Validator status") - } - } - return validatorActivated -} - -// NextSlot emits the next slot number at the start time of that slot. -func (v *validator) NextSlot() <-chan uint64 { - return v.ticker.C() -} - -// SlotDeadline is the start time of the next slot. -func (v *validator) SlotDeadline(slot uint64) time.Time { - secs := (slot + 1) * params.BeaconConfig().SecondsPerSlot - return time.Unix(int64(v.genesisTime), 0 /*ns*/).Add(time.Duration(secs) * time.Second) -} - -// UpdateProtections goes through the duties of the given slot and fetches the required validator history, -// assigning it in validator. -func (v *validator) UpdateProtections(ctx context.Context, slot uint64) error { - epoch := slot / params.BeaconConfig().SlotsPerEpoch - v.dutiesLock.RLock() - duty, ok := v.dutiesByEpoch[epoch] - if !ok { - v.dutiesLock.RUnlock() - log.Debugf("No assigned duties yet for epoch %d", epoch) - return nil - } - v.dutiesLock.RUnlock() - attestingPubKeys := make([][48]byte, 0, len(duty)) - for _, dt := range duty { - if dt == nil { - continue - } - if dt.AttesterSlot == slot { - attestingPubKeys = append(attestingPubKeys, bytesutil.ToBytes48(dt.PublicKey)) - } - } - attHistoryByPubKey, err := v.db.AttestationHistoryForPubKeys(ctx, attestingPubKeys) - if err != nil { - return errors.Wrap(err, "could not get attester history") - } - v.attesterHistoryByPubKey = attHistoryByPubKey - return nil -} - -// SaveProtections saves the attestation information currently in validator state. -func (v *validator) SaveProtections(ctx context.Context) error { - if err := v.db.SaveAttestationHistoryForPubKeys(ctx, v.attesterHistoryByPubKey); err != nil { - return errors.Wrap(err, "could not save attester history to DB") - } - v.attesterHistoryByPubKey = make(map[[48]byte]*slashpb.AttestationHistory) - return nil -} - -// isAggregator checks if a validator is an aggregator of a given slot, it uses the selection algorithm outlined in: -// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#aggregation-selection -func (v *validator) isAggregator(ctx context.Context, committee []uint64, slot uint64, pubKey [48]byte) (bool, error) { - modulo := uint64(1) - if len(committee)/int(params.BeaconConfig().TargetAggregatorsPerCommittee) > 1 { - modulo = uint64(len(committee)) / params.BeaconConfig().TargetAggregatorsPerCommittee - } - - slotSig, err := v.signSlot(ctx, pubKey, slot) - if err != nil { - return false, err - } - - b := hashutil.Hash(slotSig) - - return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil -} - -// UpdateDomainDataCaches by making calls for all of the possible domain data. These can change when -// the fork version changes which can happen once per epoch. Although changing for the fork version -// is very rare, a validator should check these data every epoch to be sure the validator is -// participating on the correct fork version. -func (v *validator) UpdateDomainDataCaches(ctx context.Context, slot uint64) { - if !featureconfig.Get().EnableDomainDataCache { - return - } - - for _, d := range [][]byte{ - params.BeaconConfig().DomainRandao[:], - params.BeaconConfig().DomainBeaconAttester[:], - params.BeaconConfig().DomainBeaconProposer[:], - params.BeaconConfig().DomainSelectionProof[:], - params.BeaconConfig().DomainAggregateAndProof[:], - } { - _, err := v.domainData(ctx, helpers.SlotToEpoch(slot), d) - if err != nil { - log.WithError(err).Errorf("Failed to update domain data for domain %v", d) - } - } -} - -// CurrentSlot based on the chain genesis time. -func (v *validator) CurrentSlot() uint64 { - var currentSlot uint64 - genesisTime := time.Unix(int64(v.genesisTime), 0) - if genesisTime.Before(roughtime.Now()) { - currentSlot = slotutil.SlotsSinceGenesis(genesisTime) - } - return currentSlot -} - -func (v *validator) domainData(ctx context.Context, epoch uint64, domain []byte) (*ethpb.DomainResponse, error) { - v.domainDataLock.Lock() - defer v.domainDataLock.Unlock() - - req := ðpb.DomainRequest{ - Epoch: epoch, - Domain: domain, - } - - key := strings.Join([]string{strconv.FormatUint(req.Epoch, 10), hex.EncodeToString(req.Domain)}, ",") - - if featureconfig.Get().EnableDomainDataCache { - if val, ok := v.domainDataCache.Get(key); ok { - return proto.Clone(val.(proto.Message)).(*ethpb.DomainResponse), nil - } - } - - res, err := v.validatorClient.DomainData(ctx, req) - if err != nil { - return nil, err - } - - if featureconfig.Get().EnableDomainDataCache { - v.domainDataCache.Set(key, proto.Clone(res), 1) - } - - return res, nil -} - -func (v *validator) logDuties(slot uint64, duties []*ethpb.DutiesResponse_Duty) { - attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch) - for i := range attesterKeys { - attesterKeys[i] = make([]string, 0) - } - proposerKeys := make([]string, params.BeaconConfig().SlotsPerEpoch) - slotOffset := helpers.StartSlot(helpers.SlotToEpoch(slot)) - - for _, duty := range duties { - if v.emitAccountMetrics { - fmtKey := fmt.Sprintf("%#x", duty.PublicKey) - metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status)) - } - - // Only interested in validators who are attesting/proposing. - // Note that SLASHING validators will have duties but their results are ignored by the network so we don't bother with them. - if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING { - continue - } - - validatorKey := fmt.Sprintf("%#x", bytesutil.Trunc(duty.PublicKey)) - attesterIndex := duty.AttesterSlot - slotOffset - if attesterIndex >= params.BeaconConfig().SlotsPerEpoch { - log.WithField("duty", duty).Warn("Invalid attester slot") - } else { - attesterKeys[duty.AttesterSlot-slotOffset] = append(attesterKeys[duty.AttesterSlot-slotOffset], validatorKey) - } - - for _, proposerSlot := range duty.ProposerSlots { - proposerIndex := proposerSlot - slotOffset - if proposerIndex >= params.BeaconConfig().SlotsPerEpoch { - log.WithField("duty", duty).Warn("Invalid proposer slot") - } else { - proposerKeys[proposerIndex] = validatorKey - } - } - } - - for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch; i++ { - if len(attesterKeys[i]) > 0 { - log.WithField("slot", slotOffset+i).WithField("attesters", len(attesterKeys[i])).WithField("pubKeys", attesterKeys[i]).Info("Attestation schedule") - } - if proposerKeys[i] != "" { - log.WithField("slot", slotOffset+i).WithField("pubKey", proposerKeys[i]).Info("Proposal schedule") - } - } -} - -// This constructs a validator subscribed key, it's used to track -// which subnet has already been pending requested. -func validatorSubscribeKey(slot uint64, committeeID uint64) [64]byte { - return bytesutil.ToBytes64(append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...)) -} diff --git a/validator/client/streaming/validator_test.go b/validator/client/streaming/validator_test.go deleted file mode 100644 index e19e6d544c..0000000000 --- a/validator/client/streaming/validator_test.go +++ /dev/null @@ -1,874 +0,0 @@ -package streaming - -import ( - "context" - "errors" - "io/ioutil" - "reflect" - "strings" - "testing" - "time" - - ptypes "github.com/gogo/protobuf/types" - "github.com/golang/mock/gomock" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/mock" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/testutil" - dbTest "github.com/prysmaticlabs/prysm/validator/db/testing" - keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" - "github.com/sirupsen/logrus" - logTest "github.com/sirupsen/logrus/hooks/test" -) - -func init() { - logrus.SetLevel(logrus.DebugLevel) - logrus.SetOutput(ioutil.Discard) -} - -var _ = Validator(&validator{}) - -const cancelledCtx = "context has been canceled" - -func publicKeys(km keymanager.KeyManager) [][]byte { - keys, err := km.FetchValidatingKeys() - if err != nil { - log.WithError(err).Debug("Cannot fetch validating keys") - } - res := make([][]byte, len(keys)) - for i := range keys { - res[i] = keys[i][:] - } - return res -} - -func generateMockStatusResponse(pubkeys [][]byte) *ethpb.ValidatorActivationResponse { - multipleStatus := make([]*ethpb.ValidatorActivationResponse_Status, len(pubkeys)) - for i, key := range pubkeys { - multipleStatus[i] = ðpb.ValidatorActivationResponse_Status{ - PublicKey: key, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_UNKNOWN_STATUS, - }, - } - } - return ðpb.ValidatorActivationResponse{Statuses: multipleStatus} -} - -func TestWaitForChainStart_SetsChainStartGenesisTime(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - genesis := uint64(time.Unix(1, 0).Unix()) - clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl) - client.EXPECT().WaitForChainStart( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.ChainStartResponse{ - Started: true, - GenesisTime: genesis, - }, - nil, - ) - if err := v.WaitForChainStart(context.Background()); err != nil { - t.Fatal(err) - } - if v.genesisTime != genesis { - t.Errorf("Expected chain start time to equal %d, received %d", genesis, v.genesisTime) - } - if v.ticker == nil { - t.Error("Expected ticker to be set, received nil") - } -} - -func TestWaitForChainStart_ContextCanceled(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - genesis := uint64(time.Unix(0, 0).Unix()) - clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl) - client.EXPECT().WaitForChainStart( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.ChainStartResponse{ - Started: true, - GenesisTime: genesis, - }, - nil, - ) - ctx, cancel := context.WithCancel(context.Background()) - cancel() - err := v.WaitForChainStart(ctx) - want := cancelledCtx - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitForChainStart_StreamSetupFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl) - client.EXPECT().WaitForChainStart( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, errors.New("failed stream")) - err := v.WaitForChainStart(context.Background()) - want := "could not setup beacon chain ChainStart streaming client" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitForChainStart_ReceiveErrorFromStream(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl) - client.EXPECT().WaitForChainStart( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - nil, - errors.New("fails"), - ) - err := v.WaitForChainStart(context.Background()) - want := "could not receive ChainStart from stream" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitForSynced_SetsGenesisTime(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - genesis := uint64(time.Unix(1, 0).Unix()) - clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl) - client.EXPECT().WaitForSynced( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.SyncedResponse{ - Synced: true, - GenesisTime: genesis, - }, - nil, - ) - if err := v.WaitForSynced(context.Background()); err != nil { - t.Fatal(err) - } - if v.genesisTime != genesis { - t.Errorf("Expected chain start time to equal %d, received %d", genesis, v.genesisTime) - } - if v.ticker == nil { - t.Error("Expected ticker to be set, received nil") - } -} - -func TestWaitForSynced_ContextCanceled(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - genesis := uint64(time.Unix(0, 0).Unix()) - clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl) - client.EXPECT().WaitForSynced( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.SyncedResponse{ - Synced: true, - GenesisTime: genesis, - }, - nil, - ) - ctx, cancel := context.WithCancel(context.Background()) - cancel() - err := v.WaitForSynced(ctx) - want := cancelledCtx - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitForSynced_StreamSetupFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl) - client.EXPECT().WaitForSynced( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, errors.New("failed stream")) - err := v.WaitForSynced(context.Background()) - want := "could not setup beacon chain Synced streaming client" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitForSynced_ReceiveErrorFromStream(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl) - client.EXPECT().WaitForSynced( - gomock.Any(), - &ptypes.Empty{}, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - nil, - errors.New("fails"), - ) - err := v.WaitForSynced(context.Background()) - want := "could not receive Synced from stream" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitActivation_ContextCanceled(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - - client.EXPECT().WaitForActivation( - gomock.Any(), - ðpb.ValidatorActivationRequest{ - PublicKeys: publicKeys(v.keyManager), - }, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.ValidatorActivationResponse{}, - nil, - ) - ctx, cancel := context.WithCancel(context.Background()) - cancel() - err := v.WaitForActivation(ctx) - want := cancelledCtx - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitActivation_StreamSetupFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - client.EXPECT().WaitForActivation( - gomock.Any(), - ðpb.ValidatorActivationRequest{ - PublicKeys: publicKeys(v.keyManager), - }, - ).Return(clientStream, errors.New("failed stream")) - err := v.WaitForActivation(context.Background()) - want := "could not setup validator WaitForActivation streaming client" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitActivation_ReceiveErrorFromStream(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - client.EXPECT().WaitForActivation( - gomock.Any(), - ðpb.ValidatorActivationRequest{ - PublicKeys: publicKeys(v.keyManager), - }, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - nil, - errors.New("fails"), - ) - err := v.WaitForActivation(context.Background()) - want := "could not receive validator activation from stream" - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitActivation_LogsActivationEpochOK(t *testing.T) { - hook := logTest.NewGlobal() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - genesisTime: 1, - } - resp := generateMockStatusResponse(publicKeys(v.keyManager)) - resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - client.EXPECT().WaitForActivation( - gomock.Any(), - ðpb.ValidatorActivationRequest{ - PublicKeys: publicKeys(v.keyManager), - }, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - resp, - nil, - ) - if err := v.WaitForActivation(context.Background()); err != nil { - t.Errorf("Could not wait for activation: %v", err) - } - testutil.AssertLogsContain(t, hook, "Validator activated") -} - -func TestWaitMultipleActivation_LogsActivationEpochOK(t *testing.T) { - hook := logTest.NewGlobal() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManagerThreeValidators, - validatorClient: client, - genesisTime: 1, - } - publicKeys := publicKeys(v.keyManager) - resp := generateMockStatusResponse(publicKeys) - resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE - resp.Statuses[1].Status.Status = ethpb.ValidatorStatus_ACTIVE - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - client.EXPECT().WaitForActivation( - gomock.Any(), - ðpb.ValidatorActivationRequest{ - PublicKeys: publicKeys, - }, - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - resp, - nil, - ) - if err := v.WaitForActivation(context.Background()); err != nil { - t.Errorf("Could not wait for activation: %v", err) - } - testutil.AssertLogsContain(t, hook, "Validator activated") -} - -func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - - v := validator{ - keyManager: testKeyManagerThreeValidators, - validatorClient: client, - genesisTime: 1, - } - resp := generateMockStatusResponse(publicKeys(v.keyManager)) - resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE - clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - client.EXPECT().WaitForActivation( - gomock.Any(), - gomock.Any(), - ).Return(clientStream, nil) - clientStream.EXPECT().Recv().Return( - ðpb.ValidatorActivationResponse{}, - nil, - ) - clientStream.EXPECT().Recv().Return( - resp, - nil, - ) - if err := v.WaitForActivation(context.Background()); err != nil { - t.Errorf("Could not wait for activation: %v", err) - } -} - -func TestWaitSync_ContextCanceled(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - n := mock.NewMockNodeClient(ctrl) - - v := validator{ - node: n, - } - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - n.EXPECT().GetSyncStatus( - gomock.Any(), - gomock.Any(), - ).Return(ðpb.SyncStatus{Syncing: true}, nil) - - err := v.WaitForSync(ctx) - want := cancelledCtx - if !strings.Contains(err.Error(), want) { - t.Errorf("Expected %v, received %v", want, err) - } -} - -func TestWaitSync_NotSyncing(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - n := mock.NewMockNodeClient(ctrl) - - v := validator{ - node: n, - } - - n.EXPECT().GetSyncStatus( - gomock.Any(), - gomock.Any(), - ).Return(ðpb.SyncStatus{Syncing: false}, nil) - - err := v.WaitForSync(context.Background()) - if err != nil { - t.Fatal(err) - } -} - -func TestWaitSync_Syncing(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - n := mock.NewMockNodeClient(ctrl) - - v := validator{ - node: n, - } - - n.EXPECT().GetSyncStatus( - gomock.Any(), - gomock.Any(), - ).Return(ðpb.SyncStatus{Syncing: true}, nil) - - n.EXPECT().GetSyncStatus( - gomock.Any(), - gomock.Any(), - ).Return(ðpb.SyncStatus{Syncing: false}, nil) - - err := v.WaitForSync(context.Background()) - if err != nil { - t.Fatal(err) - } -} - -func TestUpdateProtections_OK(t *testing.T) { - pubKey1 := [48]byte{1} - pubKey2 := [48]byte{2} - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - db := dbTest.SetupDB(t, [][48]byte{pubKey1, pubKey2}) - - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - newMap[1] = 0 - newMap[2] = 1 - history := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 2, - } - - newMap2 := make(map[uint64]uint64) - newMap2[0] = params.BeaconConfig().FarFutureEpoch - newMap2[1] = params.BeaconConfig().FarFutureEpoch - newMap2[2] = params.BeaconConfig().FarFutureEpoch - newMap2[3] = 2 - history2 := &slashpb.AttestationHistory{ - TargetToSource: newMap, - LatestEpochWritten: 3, - } - - histories := make(map[[48]byte]*slashpb.AttestationHistory) - histories[pubKey1] = history - histories[pubKey2] = history2 - if err := db.SaveAttestationHistoryForPubKeys(context.Background(), histories); err != nil { - t.Fatal(err) - } - - slot := params.BeaconConfig().SlotsPerEpoch - epoch := slot / params.BeaconConfig().SlotsPerEpoch - v := validator{ - db: db, - keyManager: testKeyManager, - validatorClient: client, - } - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - v.dutiesByEpoch[epoch] = []*ethpb.DutiesResponse_Duty{ - { - AttesterSlot: slot, - ValidatorIndex: 200, - CommitteeIndex: 100, - Committee: []uint64{0, 1, 2, 3}, - PublicKey: pubKey1[:], - }, - { - AttesterSlot: slot, - ValidatorIndex: 201, - CommitteeIndex: 100, - Committee: []uint64{0, 1, 2, 3}, - PublicKey: pubKey2[:], - }, - } - - if err := v.UpdateProtections(context.Background(), slot); err != nil { - t.Fatalf("Could not update assignments: %v", err) - } - if !reflect.DeepEqual(v.attesterHistoryByPubKey[pubKey1], history) { - t.Fatalf("Expected retrieved history to be equal to %v, received %v", history, v.attesterHistoryByPubKey[pubKey1]) - } - if !reflect.DeepEqual(v.attesterHistoryByPubKey[pubKey2], history2) { - t.Fatalf("Expected retrieved history to be equal to %v, received %v", history2, v.attesterHistoryByPubKey[pubKey2]) - } -} - -func TestSaveProtections_OK(t *testing.T) { - pubKey1 := [48]byte{1} - pubKey2 := [48]byte{2} - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - db := dbTest.SetupDB(t, [][48]byte{pubKey1, pubKey2}) - - cleanHistories, err := db.AttestationHistoryForPubKeys(context.Background(), [][48]byte{pubKey1, pubKey2}) - if err != nil { - t.Fatal(err) - } - v := validator{ - db: db, - keyManager: testKeyManager, - validatorClient: client, - attesterHistoryByPubKey: cleanHistories, - } - - history1 := cleanHistories[pubKey1] - history1 = markAttestationForTargetEpoch(history1, 0, 1) - - history2 := cleanHistories[pubKey1] - history2 = markAttestationForTargetEpoch(history1, 2, 3) - - cleanHistories[pubKey1] = history1 - cleanHistories[pubKey2] = history2 - - v.attesterHistoryByPubKey = cleanHistories - if err := v.SaveProtections(context.Background()); err != nil { - t.Fatalf("Could not update assignments: %v", err) - } - savedHistories, err := db.AttestationHistoryForPubKeys(context.Background(), [][48]byte{pubKey1, pubKey2}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(savedHistories[pubKey1], history1) { - t.Fatalf("Expected retrieved history to be equal to %v, received %v", history1, v.attesterHistoryByPubKey[pubKey1]) - } - if !reflect.DeepEqual(savedHistories[pubKey2], history2) { - t.Fatalf("Expected retrieved history to be equal to %v, received %v", history2, v.attesterHistoryByPubKey[pubKey2]) - } -} - -func TestRolesAt_OK(t *testing.T) { - v, m, finish := setup(t) - defer finish() - - sks := make([]bls.SecretKey, 4) - sks[0] = bls.RandKey() - sks[1] = bls.RandKey() - sks[2] = bls.RandKey() - sks[3] = bls.RandKey() - v.keyManager = keymanager.NewDirect(sks) - - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - CommitteeIndex: 1, - AttesterSlot: 1, - PublicKey: sks[0].PublicKey().Marshal(), - }, - { - CommitteeIndex: 2, - ProposerSlots: []uint64{1}, - PublicKey: sks[1].PublicKey().Marshal(), - }, - { - CommitteeIndex: 1, - AttesterSlot: 2, - PublicKey: sks[2].PublicKey().Marshal(), - }, - { - CommitteeIndex: 2, - AttesterSlot: 1, - ProposerSlots: []uint64{1, 5}, - PublicKey: sks[3].PublicKey().Marshal(), - }, - } - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - roleMap, err := v.RolesAt(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - - if roleMap[bytesutil.ToBytes48(sks[0].PublicKey().Marshal())][0] != roleAttester { - t.Errorf("Unexpected validator role. want: roleProposer") - } - if roleMap[bytesutil.ToBytes48(sks[1].PublicKey().Marshal())][0] != roleProposer { - t.Errorf("Unexpected validator role. want: roleAttester") - } - if roleMap[bytesutil.ToBytes48(sks[2].PublicKey().Marshal())][0] != roleUnknown { - t.Errorf("Unexpected validator role. want: UNKNOWN") - } - if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][0] != roleProposer { - t.Errorf("Unexpected validator role. want: roleProposer") - } - if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][1] != roleAttester { - t.Errorf("Unexpected validator role. want: roleAttester") - } - if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][2] != roleAggregator { - t.Errorf("Unexpected validator role. want: roleAggregator") - } -} - -func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) { - v, m, finish := setup(t) - defer finish() - - sks := make([]bls.SecretKey, 3) - sks[0] = bls.RandKey() - sks[1] = bls.RandKey() - sks[2] = bls.RandKey() - v.keyManager = keymanager.NewDirect(sks) - - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - CommitteeIndex: 1, - AttesterSlot: 0, - ProposerSlots: []uint64{0}, - PublicKey: sks[0].PublicKey().Marshal(), - }, - { - CommitteeIndex: 2, - AttesterSlot: 4, - ProposerSlots: nil, - PublicKey: sks[1].PublicKey().Marshal(), - }, - { - CommitteeIndex: 1, - AttesterSlot: 3, - ProposerSlots: nil, - PublicKey: sks[2].PublicKey().Marshal(), - }, - } - - m.validatorClient.EXPECT().DomainData( - gomock.Any(), // ctx - gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) - - roleMap, err := v.RolesAt(context.Background(), 0) - if err != nil { - t.Fatal(err) - } - - if roleMap[bytesutil.ToBytes48(sks[0].PublicKey().Marshal())][0] != roleAttester { - t.Errorf("Unexpected validator role. want: roleProposer") - } - if roleMap[bytesutil.ToBytes48(sks[1].PublicKey().Marshal())][0] != roleUnknown { - t.Errorf("Unexpected validator role. want: roleAttester") - } - if roleMap[bytesutil.ToBytes48(sks[2].PublicKey().Marshal())][0] != roleUnknown { - t.Errorf("Unexpected validator role. want: UNKNOWN") - } -} - -func TestCheckAndLogValidatorStatus_OK(t *testing.T) { - nonexistentIndex := ^uint64(0) - type statusTest struct { - name string - status *ethpb.ValidatorActivationResponse_Status - log string - active bool - } - pubKeys := [][]byte{ - bytesutil.Uint64ToBytes(0), - bytesutil.Uint64ToBytes(1), - bytesutil.Uint64ToBytes(2), - bytesutil.Uint64ToBytes(3), - } - tests := []statusTest{ - { - name: "UNKNOWN_STATUS, no deposit found yet", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Index: nonexistentIndex, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_UNKNOWN_STATUS, - }, - }, - log: "Waiting for deposit to be observed by beacon node", - }, - { - name: "DEPOSITED, deposit found", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Index: nonexistentIndex, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_DEPOSITED, - DepositInclusionSlot: 50, - Eth1DepositBlockNumber: 400, - }, - }, - log: "Deposit for validator received but not processed into the beacon state\" eth1DepositBlockNumber=400 expectedInclusionSlot=50", - }, - { - name: "DEPOSITED into state", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Index: 30, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_DEPOSITED, - PositionInActivationQueue: 30, - }, - }, - log: "Deposit processed, entering activation queue after finalization\" index=30 positionInActivationQueue=30", - }, - { - name: "PENDING", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Index: 50, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_PENDING, - ActivationEpoch: params.BeaconConfig().FarFutureEpoch, - PositionInActivationQueue: 6, - }, - }, - log: "Waiting to be assigned activation epoch\" index=50 positionInActivationQueue=6", - }, - { - name: "PENDING", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Index: 89, - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_PENDING, - ActivationEpoch: 60, - PositionInActivationQueue: 5, - }, - }, - log: "Waiting for activation\" activationEpoch=60 index=89", - }, - { - name: "EXITED", - status: ðpb.ValidatorActivationResponse_Status{ - PublicKey: pubKeys[0], - Status: ðpb.ValidatorStatusResponse{ - Status: ethpb.ValidatorStatus_EXITED, - }, - }, - log: "Validator exited", - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - hook := logTest.NewGlobal() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - client := mock.NewMockBeaconNodeValidatorClient(ctrl) - v := validator{ - keyManager: testKeyManager, - validatorClient: client, - } - v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty) - v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{ - { - CommitteeIndex: 1, - }, - } - - active := v.checkAndLogValidatorStatus([]*ethpb.ValidatorActivationResponse_Status{test.status}) - if active != test.active { - t.Fatalf("expected key to be active, expected %t, received %t", test.active, active) - } - - testutil.AssertLogsContain(t, hook, test.log) - }) - } -} diff --git a/validator/client/polling/validator.go b/validator/client/validator.go similarity index 98% rename from validator/client/polling/validator.go rename to validator/client/validator.go index ef0d0be5de..c8965879d4 100644 --- a/validator/client/polling/validator.go +++ b/validator/client/validator.go @@ -1,6 +1,6 @@ -// Package polling represents a gRPC polling-based implementation +// Package client represents a gRPC polling-based implementation // of an eth2 validator client. -package polling +package client import ( "context" @@ -26,7 +26,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/validator/client/metrics" vdb "github.com/prysmaticlabs/prysm/validator/db" keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection" @@ -237,7 +236,7 @@ func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.Valida log := log.WithFields(fields) if v.emitAccountMetrics { fmtKey := fmt.Sprintf("%#x", status.PublicKey) - metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status)) + ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status)) } switch status.Status.Status { case ethpb.ValidatorStatus_UNKNOWN_STATUS: @@ -553,7 +552,7 @@ func (v *validator) logDuties(slot uint64, duties []*ethpb.DutiesResponse_Duty) for _, duty := range duties { if v.emitAccountMetrics { fmtKey := fmt.Sprintf("%#x", duty.PublicKey) - metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status)) + ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status)) } // Only interested in validators who are attesting/proposing. diff --git a/validator/client/polling/validator_test.go b/validator/client/validator_test.go similarity index 99% rename from validator/client/polling/validator_test.go rename to validator/client/validator_test.go index e8ff3bc1f8..93f09b15b4 100644 --- a/validator/client/polling/validator_test.go +++ b/validator/client/validator_test.go @@ -1,4 +1,4 @@ -package polling +package client import ( "context" diff --git a/validator/main.go b/validator/main.go index 44a7b5b5c6..273b4ebe21 100644 --- a/validator/main.go +++ b/validator/main.go @@ -23,7 +23,7 @@ import ( "github.com/prysmaticlabs/prysm/shared/version" v1 "github.com/prysmaticlabs/prysm/validator/accounts/v1" v2 "github.com/prysmaticlabs/prysm/validator/accounts/v2" - "github.com/prysmaticlabs/prysm/validator/client/streaming" + "github.com/prysmaticlabs/prysm/validator/client" "github.com/prysmaticlabs/prysm/validator/flags" "github.com/prysmaticlabs/prysm/validator/node" "github.com/sirupsen/logrus" @@ -186,7 +186,7 @@ contract in order to activate the validator client`, ctx, cancel := context.WithTimeout( context.Background(), 10*time.Second /* Cancel if cannot connect to beacon node in 10 seconds. */) defer cancel() - dialOpts := streaming.ConstructDialOptions( + dialOpts := client.ConstructDialOptions( cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name), cliCtx.String(flags.CertFlag.Name), strings.Split(cliCtx.String(flags.GrpcHeadersFlag.Name), ","), diff --git a/validator/node/BUILD.bazel b/validator/node/BUILD.bazel index 3618189d54..c5402a729c 100644 --- a/validator/node/BUILD.bazel +++ b/validator/node/BUILD.bazel @@ -27,8 +27,7 @@ go_library( "//shared/prometheus:go_default_library", "//shared/tracing:go_default_library", "//shared/version:go_default_library", - "//validator/client/polling:go_default_library", - "//validator/client/streaming:go_default_library", + "//validator/client:go_default_library", "//validator/db/kv:go_default_library", "//validator/flags:go_default_library", "//validator/keymanager/v1:go_default_library", diff --git a/validator/node/node.go b/validator/node/node.go index 0a0164b83e..bd0bd45bf9 100644 --- a/validator/node/node.go +++ b/validator/node/node.go @@ -14,6 +14,9 @@ import ( "syscall" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli/v2" + "github.com/prysmaticlabs/prysm/shared" "github.com/prysmaticlabs/prysm/shared/cmd" "github.com/prysmaticlabs/prysm/shared/debug" @@ -22,14 +25,11 @@ import ( "github.com/prysmaticlabs/prysm/shared/prometheus" "github.com/prysmaticlabs/prysm/shared/tracing" "github.com/prysmaticlabs/prysm/shared/version" - "github.com/prysmaticlabs/prysm/validator/client/polling" - "github.com/prysmaticlabs/prysm/validator/client/streaming" + "github.com/prysmaticlabs/prysm/validator/client" "github.com/prysmaticlabs/prysm/validator/db/kv" "github.com/prysmaticlabs/prysm/validator/flags" keymanager "github.com/prysmaticlabs/prysm/validator/keymanager/v1" slashing_protection "github.com/prysmaticlabs/prysm/validator/slashing-protection" - "github.com/sirupsen/logrus" - "github.com/urfave/cli/v2" ) var log = logrus.WithField("prefix", "node") @@ -204,27 +204,7 @@ func (s *ValidatorClient) registerClientService(keyManager keymanager.KeyManager if err := s.services.FetchService(&sp); err == nil { protector = sp } - if featureconfig.Get().EnableStreamDuties { - v, err := streaming.NewValidatorService(context.Background(), &streaming.Config{ - Endpoint: endpoint, - DataDir: dataDir, - KeyManager: keyManager, - LogValidatorBalances: logValidatorBalances, - EmitAccountMetrics: emitAccountMetrics, - CertFlag: cert, - GraffitiFlag: graffiti, - GrpcMaxCallRecvMsgSizeFlag: maxCallRecvMsgSize, - GrpcRetriesFlag: grpcRetries, - GrpcHeadersFlag: s.cliCtx.String(flags.GrpcHeadersFlag.Name), - Protector: protector, - }) - - if err != nil { - return errors.Wrap(err, "could not initialize client service") - } - return s.services.RegisterService(v) - } - v, err := polling.NewValidatorService(context.Background(), &polling.Config{ + v, err := client.NewValidatorService(context.Background(), &client.Config{ Endpoint: endpoint, DataDir: dataDir, KeyManager: keyManager,