Compare commits

...

33 Commits

Author SHA1 Message Date
terence tsao
ed7ad4525e Method to retrieve block slot via block root (#5084)
* blockRootSlot

* Tests

* Gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 16:04:24 -05:00
terence tsao
7fcc07fb45 Save hot state (#5083)
* loadEpochBoundaryRoot
* Tests
* Span
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Starting test
* Tests
* Merge refs/heads/master into save-hot-state
* Merge branch 'master' into save-hot-state
* Use copy
* Merge branch 'save-hot-state' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Merge refs/heads/master into save-hot-state
2020-03-12 20:48:07 +00:00
shayzluf
f937713fe9 Broadcast slashing (#5073)
* add flag
* broadcast slashings
* Merge branch 'master' of github.com:prysmaticlabs/prysm into broadcast_slashing

# Conflicts:
#	beacon-chain/rpc/beacon/slashings_test.go
* fix tests
* goimports
* goimports
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
2020-03-12 20:29:23 +00:00
terence tsao
359e0abe1d Load epoch boundary root (#5079)
* loadEpochBoundaryRoot

* Tests

* Span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 15:00:37 -05:00
tzapu
0704ba685a Return statuses on duties (#5069)
* try to return somethign for everything
* default to unknown
* debug
* moar debug
* move else to outer check
* working
* reorder imports
* cleanup
* fix TestGetDuties_NextEpoch_CantFindValidatorIdx
* Merge branch 'master' into return-statuses-on-duties
* Update validator/client/validator.go
* Merge branch 'master' into return-statuses-on-duties
* Merge branch 'master' into return-statuses-on-duties
2020-03-12 19:07:37 +00:00
shayzluf
0f95b797af Save slashings to slasher DB (#5081)
* fix tests add error type handling

* Update slasher/detection/detect_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* goimports

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-03-12 22:08:58 +05:30
terence tsao
43722e45f4 Save cold state (#5077) 2020-03-12 05:58:06 -07:00
terence tsao
ff4ed413a3 State migration from hot to cold (archived) (#5076)
* Starting

* Test

* Tests

* comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-11 21:27:16 -05:00
Raul Jordan
f1a42eb589 Verify Slashing Signatures Before Putting Into Blocks (#5071)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* verify slashing
* added in test for pending att slashing
* tests starting to apss
* sig failed verify regression test
* tests passing for ops pool
* Update beacon-chain/operations/slashings/service.go
* Merge refs/heads/master into verify-slash-sig
* verify on insert
* tests starting to pass
* all code paths fixed
* imports
* fix build
* fix rpc errors
* Merge refs/heads/master into verify-slash-sig
2020-03-12 01:16:55 +00:00
terence tsao
a90ffaba49 Archived point retrieval and recovery (#5075) 2020-03-11 17:38:30 -07:00
Raul Jordan
663d919b6f Include Bazel Genrule for Fast SSZ (#5070)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* included new ssz bzl rule
* Merge branch 'master' into add-in-starlark-rule
* Update tools/ssz.bzl

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-11 19:50:22 +00:00
Victor Farazdagi
7b30845c01 fixes races in blocks fetcher (#5068) 2020-03-11 14:21:41 +03:00
Victor Farazdagi
46eb228379 fixes data race in state.Slot (#5067)
* fixes data race in state/getters
2020-03-11 09:11:07 +00:00
Raul Jordan
8d3fc1ad3e Add in Slasher Metrics (#5060)
* added in slasher metrics
* Merge branch 'master' into slasher-metrics
* add in prom bolt metrics for slasher
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
* imports
* include all metrics
* no dup bolt collector
* Update slasher/detection/attestations/spanner.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* naming best practices for prom, thx Terence
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
2020-03-10 19:41:55 +00:00
Nishant Das
93195b762b Improve HTR of State (#5058)
* add cache
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/hash_function.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge branch 'master' into improveHTR
* add back string casting
* fix imports
2020-03-10 16:26:54 +00:00
Jim McDonald
f0abf0d7d5 Reduce frequency of 'eth1 client not syncing' messages (#5057) 2020-03-10 09:51:41 -05:00
Nishant Das
9d27449212 Discovery Fixes (#5050)
* connect to dv5 bootnodes

* fix test

* change polling period

* ignore

* Update beacon-chain/p2p/service.go

* Update beacon-chain/p2p/service_test.go

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-09 19:53:37 -07:00
Preston Van Loon
edb6590764 Build herumi's BLS from source (#5055)
* Build herumi from source. Working so far on linux_amd64 for compile, but tests fail to initialize the curve appropriately

* Add copts to go_default_library

* llvm toolchain, still WIP

* Fixes, make llvm a config flag

* fix gazelle resolution

* comment

* comment

* update herumi to the v0.9.4 version

* Apply @nisdas patch from https://github.com/herumi/bls-eth-go-binary/pull/5
2020-03-09 21:22:41 -05:00
Raul Jordan
e77cf724b8 Better Nil Check in Slasher (#5053)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* some nil checks in slasher
2020-03-09 21:21:39 +00:00
Ivan Martinez
b633dfe880 Change detection and updating in Slasher to per attestation (#5043)
* Change span updates to update multiple validators at once

* Change detection to perform on multiple validators at once

* Fix minspan issue

* Fix indices

* Fix test

* Remove logs

* Remove more logs

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/detect.go

* nil check

* fix ununsed import

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 13:14:19 -05:00
Ivan Martinez
8334aac111 Batch saving of attestations from stream for slasher (#5041)
* Batch saving of attestations from stream for slasher

* Progress on test

* Fixes

* Fix test

* Rename

* Modify logs and timing

* Change

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 12:49:40 -05:00
Preston Van Loon
4c1e2ba196 Add prysm.sh script (#5042)
* Add prysm.sh script

* Add dist to gitignore

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-09 12:19:53 -05:00
terence tsao
25c13663d2 Add hot state by slot retrival (#5052)
* Update replay conditions

* loadHotStateBySlot

* Tests and gaz

* Tests
2020-03-09 11:22:45 -05:00
Jim McDonald
0c3af32274 Use BeaconBlockHeader in place of BeaconBlock (#5049) 2020-03-09 21:08:30 +08:00
shayzluf
01cb01a8f2 On eviction test fix (#5046) 2020-03-09 01:35:39 -04:00
Raul Jordan
0c9e99e04a Aggregate Attestations Before Streaming to Slasher (#5029)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* aggregate before streaming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* collect atts and increase buffer size
* fix test for func
* Merge refs/heads/master into agg-idx-atts
* Update beacon-chain/rpc/beacon/attestations.go
* Merge refs/heads/master into agg-idx-atts
* naming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* comment terence feedback
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Fix tests
2020-03-08 21:39:54 +00:00
Ivan Martinez
d4cd51f23e Change slasher cache to LRU cache (#5037)
* Change cache to LRU cache

* fixes

* REduce db usage

* Fix function name

* Merge issues

* Save on eviction

* Fixes

* Fix

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-08 17:11:59 -04:00
terence tsao
962fe8552d Compute state up to slot (#5035) 2020-03-08 21:41:24 +01:00
Raul Jordan
eddaea869b Prepare Slasher for Production (#5020)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* add a bit more better logging
* Empty db fix
* Improve logs
* Fix small issues in spanner, improvements
* Change costs back to 1 for now
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Change the cache back to 0
* Cleanup
* Merge branch 'master' into cleanup-slasher
* lint
* added in better spans
* log
* rem spanner in super intensive operation
* Merge branch 'master' into cleanup-slasher
* add todo
* Merge branch 'cleanup-slasher' of github.com:prysmaticlabs/prysm into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Apply suggestions from code review
* no logrus
* Merge branch 'master' into cleanup-slasher
* Merge branch 'cleanup-slasher' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Remove spammy logs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* gaz
* Rename func
* Add back needed code
* Add todo
* Add span to cache func
2020-03-08 17:56:43 +00:00
Nishant Das
300d072456 Add Config Change for Validator (#5038)
* add config for validator
* gaz
* Merge refs/heads/master into configureValidator
* Merge refs/heads/master into configureValidator
2020-03-08 06:45:36 +00:00
Nishant Das
ac1c92e241 Add Prometheus Service for Slasher (#5039)
* add prometheus service
* Update slasher/node/node.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into addPromServiceSlasher
2020-03-08 06:35:37 +00:00
terence tsao
2452c7403b Load hot state by root (#5034)
* Add loadHotStateByRoot

* Touchup loadHotStateByRoot

* Tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-08 14:24:57 +08:00
Preston Van Loon
b97e22107c Update rbe_autoconf (#5036)
* Update rbe_autoconf
* Update timestamps
2020-03-07 21:18:16 +00:00
101 changed files with 3591 additions and 1157 deletions

View File

@@ -35,6 +35,10 @@ build:release --workspace_status_command=./scripts/workspace_status.sh
build:release --stamp
build:release --compilation_mode=opt
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain

3
.gitignore vendored
View File

@@ -29,3 +29,6 @@ password.txt
# go dependancy
/go.mod
/go.sum
# Dist files
dist

View File

@@ -13,6 +13,28 @@ http_archive(
],
)
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
bazel_toolchain_dependencies()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "9.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
configure_prysm_toolchains()
@@ -70,7 +92,7 @@ git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1563544980 +0300",
shallow_since = "1569509514 +0300",
)
# Override default import in rules_go with special patch until
@@ -84,7 +106,7 @@ git_repository(
"//third_party:com_github_gogo_protobuf-equal.patch",
],
remote = "https://github.com/gogo/protobuf",
shallow_since = "1567336231 +0200",
shallow_since = "1571033717 +0200",
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
@@ -95,6 +117,10 @@ load(
container_repositories()
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
@@ -202,13 +228,6 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
)
http_archive(
name = "com_github_herumi_bls_eth_go_binary",
sha256 = "b5628a95bd1e6ff84f73d87c134bb1e7e9c1a5a2a10b831867d9dad7d8defc3e",
strip_prefix = "bls-go-binary-8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d",
url = "https://github.com/nisdas/bls-go-binary/archive/8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d.zip",
)
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
@@ -235,9 +254,9 @@ all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//v
http_archive(
name = "rules_foreign_cc",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
strip_prefix = "rules_foreign_cc-456425521973736ef346d93d3d6ba07d807047df",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/456425521973736ef346d93d3d6ba07d807047df.zip",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
@@ -1283,8 +1302,9 @@ go_repository(
go_repository(
name = "com_github_cloudflare_roughtime",
commit = "d41fdcee702eb3e5c3296288a453b9340184d37e",
importpath = "github.com/cloudflare/roughtime",
sum = "h1:jeSxE3fepJdhASERvBHI6RFkMhISv6Ir2JUybYLIVXs=",
version = "v0.0.0-20200205191924-a69ef1dab727",
)
go_repository(
@@ -1571,3 +1591,10 @@ go_repository(
sum = "h1:J1gHJRNFEk7NdiaPQQqAvxEy+7hhCsVv3uzduWybmqY=",
version = "v0.0.0-20200302201340-8c54356e12c9",
)
go_repository(
name = "com_github_ferranbt_fastssz",
importpath = "github.com/ferranbt/fastssz",
sum = "h1:oUQredbOIzWIMmeGR9dTLzSi4DqRVwxrPzSDiLJBp4Q=",
version = "v0.0.0-20200310214500-3283b9706406",
)

View File

@@ -4,17 +4,23 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"metrics.go",
"service.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -27,9 +33,9 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],

View File

@@ -0,0 +1,57 @@
package slashings
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
numPendingAttesterSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_attester_slashing_fail_sig_verify_total",
Help: "Times an pending attester slashing fails sig verification",
},
)
numPendingAttesterSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_attester_slashings",
Help: "Number of pending attester slashings in the pool",
},
)
numAttesterSlashingsIncluded = promauto.NewCounter(
prometheus.CounterOpts{
Name: "attester_slashings_included_total",
Help: "Number of attester slashings included in blocks",
},
)
attesterSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "attester_slashing_reattempts_total",
Help: "Times an attester slashing for an already slashed validator is received",
},
)
numPendingProposerSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_proposer_slashing_fail_sig_verify_total",
Help: "Times an pending proposer slashing fails sig verification",
},
)
numPendingProposerSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_proposer_slashings",
Help: "Number of pending proposer slashings in the pool",
},
)
numProposerSlashingsIncluded = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proposer_slashings_included_total",
Help: "Number of proposer slashings included in blocks",
},
)
proposerSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proposer_slashing_reattempts_total",
Help: "Times a proposer slashing for an already slashed validator is received",
},
)
)

View File

@@ -1,15 +1,18 @@
package slashings
import (
"errors"
"context"
"fmt"
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"go.opencensus.io/trace"
)
// NewPool returns an initialized attester slashing and proposer slashing pool.
@@ -23,9 +26,14 @@ func NewPool() *Pool {
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
// This method will not return more than the block enforced MaxAttesterSlashings.
func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
func (p *Pool) PendingAttesterSlashings(ctx context.Context) []*ethpb.AttesterSlashing {
p.lock.RLock()
defer p.lock.RUnlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
defer span.End()
// Update prom metric.
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
included := make(map[uint64]bool)
pending := make([]*ethpb.AttesterSlashing, 0, params.BeaconConfig().MaxAttesterSlashings)
@@ -34,6 +42,7 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
break
}
if included[slashing.validatorToSlash] {
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
continue
}
attSlashing := slashing.attesterSlashing
@@ -41,6 +50,7 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
for _, idx := range slashedVal {
included[idx] = true
}
pending = append(pending, attSlashing)
}
@@ -49,9 +59,15 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
// This method will not return more than the block enforced MaxProposerSlashings.
func (p *Pool) PendingProposerSlashings() []*ethpb.ProposerSlashing {
func (p *Pool) PendingProposerSlashings(ctx context.Context) []*ethpb.ProposerSlashing {
p.lock.RLock()
defer p.lock.RUnlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
defer span.End()
// Update prom metric.
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
pending := make([]*ethpb.ProposerSlashing, 0, params.BeaconConfig().MaxProposerSlashings)
for i, slashing := range p.pendingProposerSlashing {
if i >= int(params.BeaconConfig().MaxProposerSlashings) {
@@ -64,9 +80,20 @@ func (p *Pool) PendingProposerSlashings() []*ethpb.ProposerSlashing {
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
// has been included into a block recently, or the validator is already exited.
func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *ethpb.AttesterSlashing) error {
func (p *Pool) InsertAttesterSlashing(
ctx context.Context,
state *beaconstate.BeaconState,
slashing *ethpb.AttesterSlashing,
) error {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
defer span.End()
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
numPendingAttesterSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify attester slashing")
}
slashedVal := sliceutil.IntersectionUint64(slashing.Attestation_1.AttestingIndices, slashing.Attestation_2.AttestingIndices)
for _, val := range slashedVal {
@@ -79,6 +106,7 @@ func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *
// has been recently included in the pool of slashings, do not process this new
// slashing.
if !ok {
attesterSlashingReattempts.Inc()
return fmt.Errorf("validator at index %d cannot be slashed", val)
}
@@ -107,9 +135,21 @@ func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
// has been included recently, the validator is already exited, or the validator was already slashed.
func (p *Pool) InsertProposerSlashing(state *beaconstate.BeaconState, slashing *ethpb.ProposerSlashing) error {
func (p *Pool) InsertProposerSlashing(
ctx context.Context,
state *beaconstate.BeaconState,
slashing *ethpb.ProposerSlashing,
) error {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
defer span.End()
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
numPendingAttesterSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify proposer slashing")
}
idx := slashing.ProposerIndex
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
if err != nil {
@@ -119,6 +159,7 @@ func (p *Pool) InsertProposerSlashing(state *beaconstate.BeaconState, slashing *
// has been recently included in the pool of slashings, do not process this new
// slashing.
if !ok {
proposerSlashingReattempts.Inc()
return fmt.Errorf("validator at index %d cannot be slashed", idx)
}
@@ -154,6 +195,7 @@ func (p *Pool) MarkIncludedAttesterSlashing(as *ethpb.AttesterSlashing) {
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
}
p.included[val] = true
numAttesterSlashingsIncluded.Inc()
}
}
@@ -170,6 +212,7 @@ func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
}
p.included[ps.ProposerIndex] = true
numProposerSlashingsIncluded.Inc()
}
// this function checks a few items about a validator before proceeding with inserting

View File

@@ -1,15 +1,16 @@
package slashings
import (
"context"
"reflect"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func attesterSlashingForValIdx(valIdx ...uint64) *ethpb.AttesterSlashing {
@@ -30,25 +31,6 @@ func pendingSlashingForValIdx(valIdx ...uint64) *PendingAttesterSlashing {
}
}
func generateNPendingSlashings(n uint64) []*PendingAttesterSlashing {
pendingAttSlashings := make([]*PendingAttesterSlashing, n)
for i := uint64(0); i < n; i++ {
pendingAttSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: attesterSlashingForValIdx(i),
validatorToSlash: i,
}
}
return pendingAttSlashings
}
func generateNAttSlashings(n uint64) []*ethpb.AttesterSlashing {
attSlashings := make([]*ethpb.AttesterSlashing, n)
for i := uint64(0); i < n; i++ {
attSlashings[i] = attesterSlashingForValIdx(i)
}
return attSlashings
}
func TestPool_InsertAttesterSlashing(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
@@ -57,8 +39,44 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
err string
}
type args struct {
slashing *ethpb.AttesterSlashing
slashings []*ethpb.AttesterSlashing
}
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 20)
slashings := make([]*ethpb.AttesterSlashing, 20)
for i := 0; i < len(pendingSlashings); i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
if err := beaconState.SetSlot(helpers.StartSlot(1)); err != nil {
t.Fatal(err)
}
// We mark the following validators with some preconditions.
exitedVal, _ := beaconState.ValidatorAtIndex(uint64(2))
exitedVal.ExitEpoch = 0
futureExitedVal, _ := beaconState.ValidatorAtIndex(uint64(4))
futureExitedVal.ExitEpoch = 17
slashedVal, _ := beaconState.ValidatorAtIndex(uint64(5))
slashedVal.Slashed = true
if err := beaconState.UpdateValidatorAtIndex(uint64(2), exitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(4), futureExitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(5), slashedVal); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
fields fields
@@ -73,12 +91,12 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[0:1],
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: attesterSlashingForValIdx(1),
validatorToSlash: 1,
attesterSlashing: slashings[0],
validatorToSlash: 0,
},
},
},
@@ -89,97 +107,33 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
validatorToSlash: 0,
},
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
validatorToSlash: 1,
},
},
},
{
name: "Empty list two validators slashed out of three",
fields: fields{
pending: make([]*PendingAttesterSlashing, 0),
included: make(map[uint64]bool),
},
args: args{
slashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
validatorToSlash: 1,
},
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
validatorToSlash: 3,
},
slashings: slashings[0:2],
},
want: pendingSlashings[0:2],
},
{
name: "Duplicate identical slashing",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1),
pendingSlashings[1],
},
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1),
want: pendingSlashings[1:2],
},
{
name: "Slashing for already slashed validator",
fields: fields{
pending: []*PendingAttesterSlashing{},
included: make(map[uint64]bool),
},
args: args{
slashings: slashings[5:6],
},
want: []*PendingAttesterSlashing{},
},
{
name: "Slashing for exited validator",
@@ -188,7 +142,7 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(2),
slashings: slashings[2:3],
},
want: []*PendingAttesterSlashing{},
},
@@ -199,24 +153,9 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(4),
slashings: slashings[4:5],
},
want: []*PendingAttesterSlashing{
pendingSlashingForValIdx(4),
},
},
{
name: "Slashing for slashed validator",
fields: fields{
pending: []*PendingAttesterSlashing{},
included: make(map[uint64]bool),
wantErr: true,
err: "cannot be slashed",
},
args: args{
slashing: attesterSlashingForValIdx(5),
},
want: []*PendingAttesterSlashing{},
want: pendingSlashings[4:5],
},
{
name: "Already included",
@@ -227,7 +166,7 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
},
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*PendingAttesterSlashing{},
},
@@ -235,36 +174,15 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
name: "Maintains sorted order",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(0),
pendingSlashingForValIdx(2),
pendingSlashings[0],
pendingSlashings[2],
},
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: generateNPendingSlashings(3),
},
}
validators := []*ethpb.Validator{
{ // 0
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 1
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 2 - Already exited.
ExitEpoch: 15,
},
{ // 3
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 4 - Will be exited.
ExitEpoch: 17,
},
{ // 5 - Slashed.
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
want: pendingSlashings[0:3],
},
}
for _, tt := range tests {
@@ -273,19 +191,19 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
pendingAttesterSlashing: tt.fields.pending,
included: tt.fields.included,
}
s, err := beaconstate.InitializeFromProtoUnsafe(&p2ppb.BeaconState{
Slot: 16 * params.BeaconConfig().SlotsPerEpoch,
Validators: validators,
})
if err != nil {
t.Fatal(err)
var err error
for i := 0; i < len(tt.args.slashings); i++ {
err = p.InsertAttesterSlashing(context.Background(), beaconState, tt.args.slashings[i])
}
err = p.InsertAttesterSlashing(s, tt.args.slashing)
if err != nil && tt.fields.wantErr && !strings.Contains(err.Error(), tt.fields.err) {
t.Fatalf("Wanted err: %v, received %v", tt.fields.err, err)
}
if len(p.pendingAttesterSlashing) != len(tt.want) {
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pendingAttesterSlashing), len(tt.want))
t.Fatalf(
"Mismatched lengths of pending list. Got %d, wanted %d.",
len(p.pendingAttesterSlashing),
len(tt.want),
)
}
for i := range p.pendingAttesterSlashing {
if p.pendingAttesterSlashing[i].validatorToSlash != tt.want[i].validatorToSlash {
@@ -309,6 +227,52 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
}
}
func TestPool_InsertAttesterSlashing_SigFailsVerify_ClearPool(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 2)
slashings := make([]*ethpb.AttesterSlashing, 2)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
// We mess up the signature of the second slashing.
badSig := make([]byte, 96)
copy(badSig, "muahaha")
pendingSlashings[1].attesterSlashing.Attestation_1.Signature = badSig
slashings[1].Attestation_1.Signature = badSig
p := &Pool{
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
}
if err := p.InsertAttesterSlashing(
context.Background(),
beaconState,
slashings[0],
); err != nil {
t.Fatal(err)
}
if err := p.InsertAttesterSlashing(
context.Background(),
beaconState,
slashings[1],
); err == nil {
t.Error("Expected error when inserting slashing with bad sig, got nil")
}
// We expect to only have 1 pending attester slashing in the pool.
if len(p.pendingAttesterSlashing) != 1 {
t.Error("Expected failed attester slashing to have been cleared from pool")
}
}
func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
@@ -450,6 +414,23 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
}
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 1
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 20)
slashings := make([]*ethpb.AttesterSlashing, 20)
for i := 0; i < len(pendingSlashings); i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
tests := []struct {
name string
fields fields
@@ -465,34 +446,16 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
{
name: "All eligible",
fields: fields{
pending: generateNPendingSlashings(1),
pending: pendingSlashings,
},
want: generateNAttSlashings(1),
want: slashings[0:1],
},
{
name: "Multiple indices",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1, 5, 8),
},
pending: pendingSlashings[3:6],
},
want: []*ethpb.AttesterSlashing{
attesterSlashingForValIdx(1, 5, 8),
},
},
{
name: "All eligible, over max",
fields: fields{
pending: generateNPendingSlashings(6),
},
want: generateNAttSlashings(1),
},
{
name: "No duplicate slashings for grouped",
fields: fields{
pending: generateNPendingSlashings(16),
},
want: generateNAttSlashings(1),
want: slashings[3:4],
},
}
for _, tt := range tests {
@@ -500,70 +463,43 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
p := &Pool{
pendingAttesterSlashing: tt.fields.pending,
}
if got := p.PendingAttesterSlashings(); !reflect.DeepEqual(tt.want, got) {
if got := p.PendingAttesterSlashings(
context.Background(),
); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", tt.want, got)
}
})
}
}
func TestPool_PendingAttesterSlashings_2Max(t *testing.T) {
func TestPool_PendingAttesterSlashings_NoDuplicates(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
type fields struct {
pending []*PendingAttesterSlashing
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 3)
slashings := make([]*ethpb.AttesterSlashing, 3)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
tests := []struct {
name string
fields fields
want []*ethpb.AttesterSlashing
}{
{
name: "No duplicates with grouped att slashings",
fields: fields{
pending: []*PendingAttesterSlashing{
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 4,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 6,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 8,
},
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 12,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 24,
},
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 40,
},
},
},
want: []*ethpb.AttesterSlashing{
attesterSlashingForValIdx(4, 12, 40),
attesterSlashingForValIdx(6, 8, 24),
},
},
// We duplicate the last slashing.
pendingSlashings[2] = pendingSlashings[1]
slashings[2] = slashings[1]
p := &Pool{
pendingAttesterSlashing: pendingSlashings,
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pendingAttesterSlashing: tt.fields.pending,
}
if got := p.PendingAttesterSlashings(); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", tt.want, got)
}
})
want := slashings[0:2]
if got := p.PendingAttesterSlashings(
context.Background(),
); !reflect.DeepEqual(want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", want, got)
}
}

View File

@@ -1,15 +1,16 @@
package slashings
import (
"context"
"reflect"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func proposerSlashingForValIdx(valIdx uint64) *ethpb.ProposerSlashing {
@@ -18,14 +19,6 @@ func proposerSlashingForValIdx(valIdx uint64) *ethpb.ProposerSlashing {
}
}
func generateNProposerSlashings(n uint64) []*ethpb.ProposerSlashing {
proposerSlashings := make([]*ethpb.ProposerSlashing, n)
for i := uint64(0); i < n; i++ {
proposerSlashings[i] = proposerSlashingForValIdx(i)
}
return proposerSlashings
}
func TestPool_InsertProposerSlashing(t *testing.T) {
type fields struct {
wantErr bool
@@ -34,8 +27,40 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included map[uint64]bool
}
type args struct {
slashing *ethpb.ProposerSlashing
slashings []*ethpb.ProposerSlashing
}
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 20)
for i := 0; i < len(slashings); i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
if err := beaconState.SetSlot(helpers.StartSlot(1)); err != nil {
t.Fatal(err)
}
// We mark the following validators with some preconditions.
exitedVal, _ := beaconState.ValidatorAtIndex(uint64(2))
exitedVal.ExitEpoch = 0
futureExitedVal, _ := beaconState.ValidatorAtIndex(uint64(4))
futureExitedVal.ExitEpoch = 17
slashedVal, _ := beaconState.ValidatorAtIndex(uint64(5))
slashedVal.Slashed = true
if err := beaconState.UpdateValidatorAtIndex(uint64(2), exitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(4), futureExitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(5), slashedVal); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
fields fields
@@ -49,22 +74,22 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(0),
slashings: slashings[0:1],
},
want: generateNProposerSlashings(1),
want: slashings[0:1],
},
{
name: "Duplicate identical slashing",
fields: fields{
pending: generateNProposerSlashings(1),
pending: slashings[0:1],
included: make(map[uint64]bool),
wantErr: true,
err: "slashing object already exists in pending proposer slashings",
},
args: args{
slashing: proposerSlashingForValIdx(0),
slashings: slashings[0:1],
},
want: generateNProposerSlashings(1),
want: slashings[0:1],
},
{
name: "Slashing for exited validator",
@@ -75,7 +100,7 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
err: "cannot be slashed",
},
args: args{
slashing: proposerSlashingForValIdx(2),
slashings: slashings[2:3],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -86,11 +111,9 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(4),
},
want: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(4),
slashings: slashings[4:5],
},
want: slashings[4:5],
},
{
name: "Slashing for slashed validator",
@@ -98,10 +121,10 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
pending: []*ethpb.ProposerSlashing{},
included: make(map[uint64]bool),
wantErr: true,
err: "cannot be slashed",
err: "not slashable",
},
args: args{
slashing: proposerSlashingForValIdx(5),
slashings: slashings[5:6],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -116,7 +139,7 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
err: "cannot be slashed",
},
args: args{
slashing: proposerSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -124,56 +147,31 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
name: "Maintains sorted order",
fields: fields{
pending: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(0),
proposerSlashingForValIdx(4),
slashings[0],
slashings[2],
},
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(0),
proposerSlashingForValIdx(1),
proposerSlashingForValIdx(4),
slashings[0],
slashings[1],
slashings[2],
},
},
}
validators := []*ethpb.Validator{
{ // 0
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 1
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 2 - Already exited.
ExitEpoch: 15,
},
{ // 3
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 4 - Will be exited.
ExitEpoch: 17,
},
{ // 5 - Slashed.
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pendingProposerSlashing: tt.fields.pending,
included: tt.fields.included,
}
beaconState, err := beaconstate.InitializeFromProtoUnsafe(&p2ppb.BeaconState{
Slot: 16 * params.BeaconConfig().SlotsPerEpoch,
Validators: validators,
})
if err != nil {
t.Fatal(err)
var err error
for i := 0; i < len(tt.args.slashings); i++ {
err = p.InsertProposerSlashing(context.Background(), beaconState, tt.args.slashings[i])
}
err = p.InsertProposerSlashing(beaconState, tt.args.slashing)
if err != nil && tt.fields.wantErr && !strings.Contains(err.Error(), tt.fields.err) {
t.Fatalf("Wanted err: %v, received %v", tt.fields.err, err)
}
@@ -200,6 +198,47 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
}
}
func TestPool_InsertProposerSlashing_SigFailsVerify_ClearPool(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 2)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
// We mess up the signature of the second slashing.
badSig := make([]byte, 96)
copy(badSig, "muahaha")
slashings[1].Header_1.Signature = badSig
p := &Pool{
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
}
// We only want a single slashing to remain.
if err := p.InsertProposerSlashing(
context.Background(),
beaconState,
slashings[0],
); err != nil {
t.Fatal(err)
}
if err := p.InsertProposerSlashing(
context.Background(),
beaconState,
slashings[1],
); err == nil {
t.Error("Expected slashing with bad signature to fail, received nil")
}
// We expect to only have 1 pending proposer slashing in the pool.
if len(p.pendingProposerSlashing) != 1 {
t.Error("Expected failed proposer slashing to have been cleared from pool")
}
}
func TestPool_MarkIncludedProposerSlashing(t *testing.T) {
type fields struct {
pending []*ethpb.ProposerSlashing
@@ -336,8 +375,14 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
type fields struct {
pending []*ethpb.ProposerSlashing
}
type args struct {
validatorToSlash uint64
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 20)
for i := 0; i < len(slashings); i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
tests := []struct {
name string
@@ -354,16 +399,16 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
{
name: "All eligible",
fields: fields{
pending: generateNProposerSlashings(6),
pending: slashings[:params.BeaconConfig().MaxProposerSlashings],
},
want: generateNProposerSlashings(6),
want: slashings[:params.BeaconConfig().MaxProposerSlashings],
},
{
name: "All eligible, more than max",
name: "Multiple indices",
fields: fields{
pending: generateNProposerSlashings(24),
pending: slashings[3:6],
},
want: generateNProposerSlashings(16),
want: slashings[3:6],
},
}
for _, tt := range tests {
@@ -371,8 +416,10 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
p := &Pool{
pendingProposerSlashing: tt.fields.pending,
}
if got := p.PendingProposerSlashings(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("PendingProposerSlashings() = %v, want %v", got, tt.want)
if got := p.PendingProposerSlashings(
context.Background(),
); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingProposerSlashings, wanted %v, received %v", tt.want, got)
}
})
}

View File

@@ -132,7 +132,7 @@ func TestMultiAddrConversion_OK(t *testing.T) {
}
func TestStaticPeering_PeersAreAdded(t *testing.T) {
cfg := &Config{Encoding: "ssz"}
cfg := &Config{Encoding: "ssz", MaxPeers: 30}
port := 3000
var staticPeers []string
var hosts []host.Host

View File

@@ -26,11 +26,13 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/runutil"
"github.com/sirupsen/logrus"
)
var _ = shared.Service(&Service{})
var pollingPeriod = 1 * time.Second
// Check local table every 5 seconds for newly added peers.
var pollingPeriod = 5 * time.Second
const prysmProtocolPrefix = "/prysm/0.0.0"
@@ -158,7 +160,7 @@ func (s *Service) Start() {
s.startupErr = err
return
}
err = s.addBootNodesToExclusionList()
err = s.connectToBootnodes()
if err != nil {
log.WithError(err).Error("Could not add bootnode to the exclusion list")
s.startupErr = err
@@ -293,12 +295,8 @@ func (s *Service) Peers() *peers.Status {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
bootNode, err := enode.Parse(enode.ValidSchemes, s.cfg.Discv5BootStrapAddr[0])
if err != nil {
log.Fatal(err)
}
runutil.RunEvery(s.ctx, pollingPeriod, func() {
nodes := s.dv5Listener.Lookup(bootNode.ID())
nodes := s.dv5Listener.LookupRandom()
multiAddresses := convertToMultiAddr(nodes)
s.connectWithAllPeers(multiAddresses)
})
@@ -313,6 +311,11 @@ func (s *Service) connectWithAllPeers(multiAddrs []ma.Multiaddr) {
for _, info := range addrInfos {
// make each dial non-blocking
go func(info peer.AddrInfo) {
if len(s.Peers().Active()) >= int(s.cfg.MaxPeers) {
log.WithFields(logrus.Fields{"peer": info.ID.String(),
"reason": "at peer limit"}).Trace("Not dialing peer")
return
}
if info.ID == s.host.ID() {
return
}
@@ -327,24 +330,17 @@ func (s *Service) connectWithAllPeers(multiAddrs []ma.Multiaddr) {
}
}
func (s *Service) addBootNodesToExclusionList() error {
func (s *Service) connectToBootnodes() error {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddr))
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return err
}
multAddr, err := convertToSingleMultiAddr(bootNode)
if err != nil {
return err
}
addrInfo, err := peer.AddrInfoFromP2pAddr(multAddr)
if err != nil {
return err
}
// bootnode is never dialled, so ttl is tentatively 1 year
s.exclusionList.Set(addrInfo.ID.String(), true, 1)
nodes = append(nodes, bootNode)
}
multiAddresses := convertToMultiAddr(nodes)
s.connectWithAllPeers(multiAddresses)
return nil
}

View File

@@ -130,12 +130,20 @@ func TestListenForNewNodes(t *testing.T) {
bootListener := createListener(ipAddr, pkey, cfg)
defer bootListener.Close()
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
defer func() {
pollingPeriod = currentPeriod
}()
bootNode := bootListener.Self()
cfg = &Config{
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
Encoding: "ssz",
MaxPeers: 30,
}
var listeners []*discover.UDPv5
var hosts []host.Host

View File

@@ -30,6 +30,7 @@ go_library(
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/trieutil"
"github.com/sirupsen/logrus"
)
@@ -523,7 +524,7 @@ func (s *Service) handleDelayTicker() {
// (analyzed the time of the block from 2018-09-01 to 2019-02-13)
fiveMinutesTimeout := time.Now().Add(-5 * time.Minute)
// check that web3 client is syncing
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) && roughtime.Now().Second()%15 == 0 {
log.Warn("eth1 client is not syncing")
}
if !s.chainStartData.Chainstarted {

View File

@@ -31,12 +31,14 @@ go_library(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/pagination:go_default_library",
"//shared/params:go_default_library",
@@ -81,10 +83,12 @@ go_test(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"sort"
"strconv"
"time"
ptypes "github.com/gogo/protobuf/types"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -16,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/pagination"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -249,6 +251,7 @@ func (bs *Server) StreamIndexedAttestations(
attestationsChannel := make(chan *feed.Event, 1)
attSub := bs.AttestationNotifier.OperationFeed().Subscribe(attestationsChannel)
defer attSub.Unsubscribe()
go bs.collectReceivedAttestations(stream.Context())
for {
select {
case event := <-attestationsChannel:
@@ -262,21 +265,33 @@ func (bs *Server) StreamIndexedAttestations(
// One nil attestation shouldn't stop the stream.
continue
}
epoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(stream.Context(), epoch)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not retrieve committees for epoch %d: %v",
epoch,
err,
)
}
// We use the retrieved committees for the epoch to convert all attestations
// into indexed form effectively.
startSlot := helpers.StartSlot(epoch)
endSlot := startSlot + params.BeaconConfig().SlotsPerEpoch
att := data.Attestation
bs.ReceivedAttestationsBuffer <- data.Attestation
}
case atts := <-bs.CollectedAttestationsBuffer:
// We aggregate the received attestations.
aggAtts, err := helpers.AggregateAttestations(atts)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not aggregate attestations: %v",
err,
)
}
epoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(stream.Context(), epoch)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not retrieve committees for epoch %d: %v",
epoch,
err,
)
}
// We use the retrieved committees for the epoch to convert all attestations
// into indexed form effectively.
startSlot := helpers.StartSlot(epoch)
endSlot := startSlot + params.BeaconConfig().SlotsPerEpoch
for _, att := range aggAtts {
// Out of range check, the attestation slot cannot be greater
// the last slot of the requested epoch or smaller than its start slot
// given committees are accessed as a map of slot -> commitees list, where there are
@@ -284,7 +299,6 @@ func (bs *Server) StreamIndexedAttestations(
if att.Data.Slot < startSlot || att.Data.Slot > endSlot {
continue
}
committeesForSlot, ok := committeesBySlot[att.Data.Slot]
if !ok || committeesForSlot.Committees == nil {
continue
@@ -311,6 +325,36 @@ func (bs *Server) StreamIndexedAttestations(
}
}
// TODO(#5031): Instead of doing aggregation here, leverage the aggregation
// already being done by the attestation pool in the operations service.
func (bs *Server) collectReceivedAttestations(ctx context.Context) {
attsByRoot := make(map[[32]byte][]*ethpb.Attestation)
halfASlot := time.Duration(params.BeaconConfig().SecondsPerSlot / 2)
ticker := time.NewTicker(time.Second * halfASlot)
for {
select {
case <-ticker.C:
for root, atts := range attsByRoot {
if len(atts) > 0 {
bs.CollectedAttestationsBuffer <- atts
attsByRoot[root] = make([]*ethpb.Attestation, 0)
}
}
case att := <-bs.ReceivedAttestationsBuffer:
attDataRoot, err := ssz.HashTreeRoot(att.Data)
if err != nil {
logrus.Errorf("Could not hash tree root data: %v", err)
continue
}
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)
case <-ctx.Done():
return
case <-bs.Ctx.Done():
return
}
}
}
// AttestationPool retrieves pending attestations.
//
// The server returns a list of attestations that have been seen but not

View File

@@ -883,7 +883,7 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mockRPC.NewMockBeaconChain_StreamIndexedAttestationsServer(ctrl)
mockStream.EXPECT().Context().Return(ctx)
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
go func(tt *testing.T) {
if err := server.StreamIndexedAttestations(
&ptypes.Empty{},
@@ -897,7 +897,7 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
exitRoutine <- true
}
func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
exitRoutine := make(chan bool)
@@ -977,10 +977,16 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
atts = append(atts, attExample)
}
}
aggAtts, err := helpers.AggregateAttestations(atts)
if err != nil {
t.Fatal(err)
}
// Next up we convert the test attestations to indexed form.
indexedAtts := make([]*ethpb.IndexedAttestation, len(atts), len(atts))
indexedAtts := make([]*ethpb.IndexedAttestation, len(aggAtts), len(aggAtts))
for i := 0; i < len(indexedAtts); i++ {
att := atts[i]
att := aggAtts[i]
committee := committees[att.Data.Slot].Committees[att.Data.CommitteeIndex]
idxAtt, err := attestationutil.ConvertToIndexed(ctx, att, committee.ValidatorIndices)
if err != nil {
@@ -999,7 +1005,8 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now(),
},
AttestationNotifier: chainService.OperationNotifier(),
AttestationNotifier: chainService.OperationNotifier(),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 1),
}
mockStream := mockRPC.NewMockBeaconChain_StreamIndexedAttestationsServer(ctrl)
@@ -1020,15 +1027,7 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
}
}(t)
for i := 0; i < len(atts); i++ {
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = server.AttestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{Attestation: atts[i]},
})
}
}
server.CollectedAttestationsBuffer <- atts
<-exitRoutine
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
@@ -12,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
@@ -20,20 +22,23 @@ import (
// providing RPC endpoints to access data relevant to the Ethereum 2.0 phase 0
// beacon chain.
type Server struct {
BeaconDB db.ReadOnlyDatabase
Ctx context.Context
ChainStartFetcher powchain.ChainStartFetcher
HeadFetcher blockchain.HeadFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ParticipationFetcher blockchain.ParticipationFetcher
DepositFetcher depositcache.DepositFetcher
BlockFetcher powchain.POWBlockFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
AttestationsPool attestations.Pool
SlashingsPool *slashings.Pool
CanonicalStateChan chan *pbp2p.BeaconState
ChainStartChan chan time.Time
BeaconDB db.ReadOnlyDatabase
Ctx context.Context
ChainStartFetcher powchain.ChainStartFetcher
HeadFetcher blockchain.HeadFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ParticipationFetcher blockchain.ParticipationFetcher
DepositFetcher depositcache.DepositFetcher
BlockFetcher powchain.POWBlockFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationsPool attestations.Pool
SlashingsPool *slashings.Pool
CanonicalStateChan chan *pbp2p.BeaconState
ChainStartChan chan time.Time
ReceivedAttestationsBuffer chan *ethpb.Attestation
CollectedAttestationsBuffer chan []*ethpb.Attestation
}

View File

@@ -4,6 +4,7 @@ import (
"context"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -20,9 +21,12 @@ func (bs *Server) SubmitProposerSlashing(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
if err := bs.SlashingsPool.InsertProposerSlashing(beaconState, req); err != nil {
if err := bs.SlashingsPool.InsertProposerSlashing(ctx, beaconState, req); err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
}
if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req)
}
return &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{req.ProposerIndex},
}, nil
@@ -39,9 +43,12 @@ func (bs *Server) SubmitAttesterSlashing(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
if err := bs.SlashingsPool.InsertAttesterSlashing(beaconState, req); err != nil {
if err := bs.SlashingsPool.InsertAttesterSlashing(ctx, beaconState, req); err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
}
if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req)
}
slashedIndices := sliceutil.IntersectionUint64(req.Attestation_1.AttestingIndices, req.Attestation_2.AttestingIndices)
return &ethpb.SubmitSlashingResponse{
SlashedIndices: slashedIndices,

View File

@@ -2,47 +2,37 @@ package beacon
import (
"context"
"strconv"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestServer_SubmitProposerSlashing(t *testing.T) {
ctx := context.Background()
vals := make([]*ethpb.Validator, 10)
for i := 0; i < len(vals); i++ {
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
vals[i] = &ethpb.Validator{
PublicKey: key[:],
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
}
}
// We mark the validator at index 5 as already slashed.
vals[5].Slashed = true
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: vals,
})
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
// We want a proposer slashing for validator with index 2 to
@@ -50,27 +40,11 @@ func TestServer_SubmitProposerSlashing(t *testing.T) {
wanted := &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{2},
}
slashing := &ethpb.ProposerSlashing{
ProposerIndex: 2,
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 0,
ParentRoot: nil,
StateRoot: nil,
BodyRoot: nil,
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 0,
ParentRoot: nil,
StateRoot: nil,
BodyRoot: nil,
},
Signature: make([]byte, 96),
},
slashing, err := testutil.GenerateProposerSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
res, err := bs.SubmitProposerSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
@@ -79,59 +53,103 @@ func TestServer_SubmitProposerSlashing(t *testing.T) {
t.Errorf("Wanted %v, received %v", wanted, res)
}
if mb.BroadcastCalled {
t.Errorf("Expected broadcast not to be called by default")
}
slashing, err = testutil.GenerateProposerSlashingForValidator(st, privs[5], uint64(5))
if err != nil {
t.Fatal(err)
}
// We do not want a proposer slashing for an already slashed validator
// (the validator at index 5) to be included in the pool.
slashing.ProposerIndex = 5
if _, err := bs.SubmitProposerSlashing(ctx, slashing); err == nil {
t.Error("Expected including a proposer slashing for an already slashed validator to fail")
}
}
func TestServer_SubmitAttesterSlashing(t *testing.T) {
func TestServer_SubmitProposerSlashingBroadcast(t *testing.T) {
ctx := context.Background()
vals := make([]*ethpb.Validator, 10)
for i := 0; i < len(vals); i++ {
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
vals[i] = &ethpb.Validator{
PublicKey: key[:],
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
}
}
// We mark the validators at index 5, 6, 7 as already slashed.
vals[5].Slashed = true
vals[6].Slashed = true
vals[7].Slashed = true
cfg := featureconfig.Get()
cfg.BroadcastSlashings = true
featureconfig.Init(cfg)
defer func() {
cfg.BroadcastSlashings = false
featureconfig.Init(cfg)
}()
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: vals,
})
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing := &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{2, 3, 4},
},
// We want a proposer slashing for validator with index 2 to
// be included in the pool.
slashing, err := testutil.GenerateProposerSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
_, err = bs.SubmitProposerSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
}
if !mb.BroadcastCalled {
t.Errorf("Expected broadcast to be called")
}
}
func TestServer_SubmitAttesterSlashing(t *testing.T) {
ctx := context.Background()
// We mark the validators at index 5, 6 as already slashed.
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing, err := testutil.GenerateAttesterSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
// We want the intersection of the slashing attesting indices
// to be slashed, so we expect validators 2 and 3 to be in the response
// slashed indices.
wanted := &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{2, 3},
SlashedIndices: []uint64{2},
}
res, err := bs.SubmitAttesterSlashing(ctx, slashing)
if err != nil {
@@ -140,18 +158,65 @@ func TestServer_SubmitAttesterSlashing(t *testing.T) {
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
}
if mb.BroadcastCalled {
t.Errorf("Expected broadcast not to be called by default")
}
slashing, err = testutil.GenerateAttesterSlashingForValidator(st, privs[5], uint64(5))
if err != nil {
t.Fatal(err)
}
// If any of the attesting indices in the slashing object have already
// been slashed, we should fail to insert properly into the attester slashing pool.
slashing = &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{5, 6, 7},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{6, 7, 8},
},
}
if _, err := bs.SubmitAttesterSlashing(ctx, slashing); err == nil {
t.Error("Expected including a attester slashing for an already slashed validator to fail")
}
}
func TestServer_SubmitAttesterSlashingBroadcast(t *testing.T) {
ctx := context.Background()
cfg := featureconfig.Get()
cfg.BroadcastSlashings = true
featureconfig.Init(cfg)
defer func() {
cfg.BroadcastSlashings = false
featureconfig.Init(cfg)
}()
// We mark the validators at index 5, 6 as already slashed.
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing, err := testutil.GenerateAttesterSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
// We want the intersection of the slashing attesting indices
// to be slashed, so we expect validators 2 and 3 to be in the response
// slashed indices.
_, err = bs.SubmitAttesterSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
}
if !mb.BroadcastCalled {
t.Errorf("Expected broadcast to be called when flag is set")
}
}

View File

@@ -242,21 +242,24 @@ func (s *Service) Start() {
PeersFetcher: s.peersFetcher,
}
beaconChainServer := &beacon.Server{
Ctx: s.ctx,
BeaconDB: s.beaconDB,
AttestationsPool: s.attestationsPool,
SlashingsPool: s.slashingsPool,
HeadFetcher: s.headFetcher,
FinalizationFetcher: s.finalizationFetcher,
ParticipationFetcher: s.participationFetcher,
ChainStartFetcher: s.chainStartFetcher,
DepositFetcher: s.depositFetcher,
BlockFetcher: s.powChainService,
CanonicalStateChan: s.canonicalStateChan,
GenesisTimeFetcher: s.genesisTimeFetcher,
StateNotifier: s.stateNotifier,
BlockNotifier: s.blockNotifier,
AttestationNotifier: s.operationNotifier,
Ctx: s.ctx,
BeaconDB: s.beaconDB,
AttestationsPool: s.attestationsPool,
SlashingsPool: s.slashingsPool,
HeadFetcher: s.headFetcher,
FinalizationFetcher: s.finalizationFetcher,
ParticipationFetcher: s.participationFetcher,
ChainStartFetcher: s.chainStartFetcher,
DepositFetcher: s.depositFetcher,
BlockFetcher: s.powChainService,
CanonicalStateChan: s.canonicalStateChan,
GenesisTimeFetcher: s.genesisTimeFetcher,
StateNotifier: s.stateNotifier,
BlockNotifier: s.blockNotifier,
AttestationNotifier: s.operationNotifier,
Broadcaster: s.p2p,
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100),
}
aggregatorServer := &aggregator.Server{ValidatorServer: validatorServer}
pb.RegisterAggregatorServiceServer(s.grpcServer, aggregatorServer)

View File

@@ -64,8 +64,10 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth
assignment.ProposerSlot = proposerIndexToSlot[idx]
assignment.CommitteeIndex = ca.CommitteeIndex
}
} else {
vs := vs.validatorStatus(ctx, pubKey, s)
assignment.Status = vs.Status
}
validatorAssignments = append(validatorAssignments, assignment)
}

View File

@@ -6,13 +6,16 @@ import (
"fmt"
"strings"
"testing"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
blk "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -67,10 +70,19 @@ func TestGetDuties_NextEpoch_CantFindValidatorIdx(t *testing.T) {
t.Fatalf("Could not get signing root %v", err)
}
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
0: uint64(height),
},
}
vs := &Server{
BeaconDB: db,
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
Eth1InfoFetcher: p,
DepositFetcher: depositcache.NewDepositCache(),
}
pubKey := pubKey(99999)

View File

@@ -89,8 +89,8 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
Deposits: deposits,
Attestations: atts,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(),
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(),
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx),
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx),
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot),
Graffiti: graffiti[:],
},

View File

@@ -99,7 +99,7 @@ func TestGetBlock_OK(t *testing.T) {
privKeys[0],
0, /* validator index */
)
if err := proposerServer.SlashingsPool.InsertProposerSlashing(beaconState, proposerSlashing); err != nil {
if err := proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing); err != nil {
t.Fatal(err)
}
@@ -109,7 +109,7 @@ func TestGetBlock_OK(t *testing.T) {
privKeys[1],
1, /* validator index */
)
if err := proposerServer.SlashingsPool.InsertAttesterSlashing(beaconState, attesterSlashing); err != nil {
if err := proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing); err != nil {
t.Fatal(err)
}

View File

@@ -34,6 +34,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"getters_test.go",
"references_test.go",
"types_test.go",
],

View File

@@ -134,6 +134,9 @@ func (b *BeaconState) Slot() uint64 {
if !b.HasInnerState() {
return 0
}
b.lock.RLock()
defer b.lock.RUnlock()
return b.state.Slot
}

View File

@@ -0,0 +1,25 @@
package state
import (
"sync"
"testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestBeaconState_SlotDataRace(t *testing.T) {
headState, _ := InitializeFromProto(&pb.BeaconState{Slot: 1})
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
headState.SetSlot(uint64(0))
wg.Done()
}()
go func() {
headState.Slot()
wg.Done()
}()
wg.Wait()
}

View File

@@ -3,19 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cold.go",
"epoch_boundary_root.go",
"errors.go",
"hot.go",
"log.go",
"migrate.go",
"replay.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
@@ -30,7 +36,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"cold_test.go",
"epoch_boundary_root_test.go",
"hot_test.go",
"migrate_test.go",
"replay_test.go",
"service_test.go",
],
@@ -44,7 +53,9 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -0,0 +1,115 @@
package stategen
import (
"context"
"encoding/hex"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This saves a pre finalized beacon state in the cold section of the DB. The returns an error
// and not store anything if the state does not lie on an archive point boundary.
func (s *State) saveColdState(ctx context.Context, blockRoot [32]byte, state *state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.saveColdState")
defer span.End()
if state.Slot()%s.slotsPerArchivedPoint != 0 {
return errSlotNonArchivedPoint
}
archivedPointIndex := state.Slot() / s.slotsPerArchivedPoint
if err := s.beaconDB.SaveArchivedPointState(ctx, state, archivedPointIndex); err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, blockRoot, archivedPointIndex); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": state.Slot(),
"blockRoot": hex.EncodeToString(bytesutil.Trunc(blockRoot[:]))}).Info("Saved full state on archived point")
return nil
}
// Given the archive index, this returns the archived cold state in the DB.
// If the archived state does not exist in the state, it'll compute it and save it.
func (s *State) archivedPointByIndex(ctx context.Context, archiveIndex uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadArchivedPointByIndex")
defer span.End()
if s.beaconDB.HasArchivedPoint(ctx, archiveIndex) {
return s.beaconDB.ArchivedPointState(ctx, archiveIndex)
}
// If for certain reasons, archived point does not exist in DB,
// a node should regenerate it and save it.
return s.recoverArchivedPointByIndex(ctx, archiveIndex)
}
// This recovers an archived point by index. For certain reasons (ex. user toggles feature flag),
// an archived point may not be present in the DB. This regenerates the archived point state via
// playback and saves the archived root/state to the DB.
func (s *State) recoverArchivedPointByIndex(ctx context.Context, archiveIndex uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.recoverArchivedPointByIndex")
defer span.End()
archivedSlot := archiveIndex * s.slotsPerArchivedPoint
archivedState, err := s.ComputeStateUpToSlot(ctx, archivedSlot)
if err != nil {
return nil, errors.Wrap(err, "could not compute state up to archived index slot")
}
if archivedState == nil {
return nil, errUnknownArchivedState
}
lastRoot, _, err := s.lastSavedBlock(ctx, archivedSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get last valid block up to archived index slot")
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, lastRoot, archiveIndex); err != nil {
return nil, err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, archivedState, archiveIndex); err != nil {
return nil, err
}
return archivedState, nil
}
// Given a block root, this returns the slot of the block root using state summary look up in DB.
// If state summary does not exist in DB, it will recover the state summary and save it to the DB.
// This is used to cover corner cases where users toggle new state service's feature flag.
func (s *State) blockRootSlot(ctx context.Context, blockRoot [32]byte) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.blockRootSlot")
defer span.End()
if s.beaconDB.HasStateSummary(ctx, blockRoot) {
summary, err := s.beaconDB.StateSummary(ctx, blockRoot)
if err != nil {
return 0, nil
}
if summary == nil {
return 0, errUnknownStateSummary
}
return summary.Slot, nil
}
// Couldn't find state summary in DB. Retry with block bucket to get block slot.
b, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
return 0, err
}
if b == nil || b.Block == nil {
return 0, errUnknownBlock
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: blockRoot[:], Slot: b.Block.Slot}); err != nil {
return 0, errors.Wrap(err, "could not save state summary")
}
return b.Block.Slot, nil
}

View File

@@ -0,0 +1,212 @@
package stategen
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestSaveColdState_NonArchivedPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
if err := service.saveColdState(ctx, [32]byte{}, beaconState); err != errSlotNonArchivedPoint {
t.Error("Did not get wanted error")
}
}
func TestSaveColdState_CanSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
r := [32]byte{'a'}
if err := service.saveColdState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
if !service.beaconDB.HasArchivedPoint(ctx, 1) {
t.Error("Did not save cold state")
}
if service.beaconDB.ArchivedPointRoot(ctx, 1) != r {
t.Error("Did not get wanted root")
}
receivedState, err := service.beaconDB.ArchivedPointState(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(receivedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not get wanted state")
}
}
func TestArchivedPointByIndex_HasPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
index := uint64(999)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, index); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{'A'}, index); err != nil {
t.Fatal(err)
}
savedArchivedState, err := service.archivedPointByIndex(ctx, index)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(beaconState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff saved state")
}
}
func TestArchivedPointByIndex_DoesntHavePoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
service.slotsPerArchivedPoint = 32
recoveredState, err := service.archivedPointByIndex(ctx, 2)
if err != nil {
t.Fatal(err)
}
if recoveredState.Slot() != service.slotsPerArchivedPoint*2 {
t.Error("Diff state slot")
}
savedArchivedState, err := service.beaconDB.ArchivedPointState(ctx, 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(recoveredState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff saved archived state")
}
}
func TestRecoverArchivedPointByIndex_CanRecover(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
service.slotsPerArchivedPoint = 32
recoveredState, err := service.recoverArchivedPointByIndex(ctx, 1)
if err != nil {
t.Fatal(err)
}
if recoveredState.Slot() != service.slotsPerArchivedPoint {
t.Error("Diff state slot")
}
savedArchivedState, err := service.beaconDB.ArchivedPointState(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(recoveredState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff savled state")
}
}
func TestBlockRootSlot_Exists(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
bRoot := [32]byte{'A'}
bSlot := uint64(100)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: bSlot,
Root: bRoot[:],
}); err != nil {
t.Fatal(err)
}
slot, err := service.blockRootSlot(ctx, bRoot)
if err != nil {
t.Fatal(err)
}
if slot != bSlot {
t.Error("Did not get correct block root slot")
}
}
func TestBlockRootSlot_CanRecoverAndSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
bSlot := uint64(100)
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: bSlot}}
bRoot, _ := ssz.HashTreeRoot(b.Block)
if err := service.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
slot, err := service.blockRootSlot(ctx, bRoot)
if err != nil {
t.Fatal(err)
}
if slot != bSlot {
t.Error("Did not get correct block root slot")
}
// Verify state summary is saved.
if !service.beaconDB.HasStateSummary(ctx, bRoot) {
t.Error("State summary not saved")
}
}

View File

@@ -0,0 +1,191 @@
package stategen
import (
"context"
"encoding/hex"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This saves a post finalized beacon state in the hot section of the DB. On the epoch boundary,
// it saves a full state. On an intermediate slot, it saves a back pointer to the
// nearest epoch boundary state.
func (s *State) saveHotState(ctx context.Context, blockRoot [32]byte, state *state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.saveHotState")
defer span.End()
// If the hot state is already in cache, one can be sure the state was processed and in the DB.
if s.hotStateCache.Has(blockRoot) {
return nil
}
// Only on an epoch boundary slot, saves the whole state.
if helpers.IsEpochStart(state.Slot()) {
if err := s.beaconDB.SaveState(ctx, state, blockRoot); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": state.Slot(),
"blockRoot": hex.EncodeToString(bytesutil.Trunc(blockRoot[:]))}).Info("Saved full state on epoch boundary")
}
// On an intermediate slots, save the hot state summary.
epochRoot, err := s.loadEpochBoundaryRoot(ctx, blockRoot, state)
if err != nil {
return errors.Wrap(err, "could not get epoch boundary root to save hot state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: state.Slot(),
Root: blockRoot[:],
BoundaryRoot: epochRoot[:],
}); err != nil {
return err
}
// Store the copied state in the cache.
s.hotStateCache.Put(blockRoot, state.Copy())
return nil
}
// This loads a post finalized beacon state from the hot section of the DB. If necessary it will
// replay blocks starting from the nearest epoch boundary. It returns the beacon state that
// corresponds to the input block root.
func (s *State) loadHotStateByRoot(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadHotStateByRoot")
defer span.End()
// Load the hot state cache.
cachedState := s.hotStateCache.Get(blockRoot)
if cachedState != nil {
return cachedState, nil
}
summary, err := s.beaconDB.StateSummary(ctx, blockRoot)
if err != nil {
return nil, err
}
if summary == nil {
return nil, errUnknownStateSummary
}
boundaryState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(summary.BoundaryRoot))
if err != nil {
return nil, err
}
if boundaryState == nil {
return nil, errUnknownBoundaryState
}
// Don't need to replay the blocks if we're already on an epoch boundary,
// the target slot is the same as the state slot.
var hotState *state.BeaconState
targetSlot := summary.Slot
if targetSlot == boundaryState.Slot() {
hotState = boundaryState
} else {
blks, err := s.LoadBlocks(ctx, boundaryState.Slot()+1, targetSlot, bytesutil.ToBytes32(summary.Root))
if err != nil {
return nil, errors.Wrap(err, "could not load blocks for hot state using root")
}
hotState, err = s.ReplayBlocks(ctx, boundaryState, blks, targetSlot)
if err != nil {
return nil, errors.Wrap(err, "could not replay blocks for hot state using root")
}
}
// Save the copied state because the reference also returned in the end.
s.hotStateCache.Put(blockRoot, hotState.Copy())
return hotState, nil
}
// This loads a hot state by slot where the slot lies between the epoch boundary points.
// This is a slower implementation (versus ByRoot) as slot is the only argument. It require fetching
// all the blocks between the epoch boundary points for playback.
// Use `loadHotStateByRoot` unless you really don't know the root.
func (s *State) loadHotStateBySlot(ctx context.Context, slot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadHotStateBySlot")
defer span.End()
// Gather epoch boundary information, that is where node starts to replay the blocks.
boundarySlot := helpers.StartSlot(helpers.SlotToEpoch(slot))
boundaryRoot, ok := s.epochBoundaryRoot(boundarySlot)
if !ok {
return nil, errUnknownBoundaryRoot
}
// Try the cache first then try the DB.
boundaryState := s.hotStateCache.Get(boundaryRoot)
var err error
if boundaryState == nil {
boundaryState, err = s.beaconDB.State(ctx, boundaryRoot)
if err != nil {
return nil, err
}
if boundaryState == nil {
return nil, errUnknownBoundaryState
}
}
// Gather the last saved block root and the slot number.
lastValidRoot, lastValidSlot, err := s.lastSavedBlock(ctx, slot)
if err != nil {
return nil, errors.Wrap(err, "could not get last valid block for hot state using slot")
}
// Load and replay blocks to get the intermediate state.
replayBlks, err := s.LoadBlocks(ctx, boundaryState.Slot()+1, lastValidSlot, lastValidRoot)
if err != nil {
return nil, err
}
return s.ReplayBlocks(ctx, boundaryState, replayBlks, slot)
}
// This loads the epoch boundary root of a given state based on the state slot.
// If the epoch boundary does not have a valid root, it then recovers by going
// back to find the last slot before boundary which has a valid block.
func (s *State) loadEpochBoundaryRoot(ctx context.Context, blockRoot [32]byte, state *state.BeaconState) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadEpochBoundaryRoot")
defer span.End()
boundarySlot := helpers.CurrentEpoch(state) * params.BeaconConfig().SlotsPerEpoch
// First checks if epoch boundary root already exists in cache.
r, ok := s.epochBoundarySlotToRoot[boundarySlot]
if ok {
return r, nil
}
// At epoch boundary, return the root which is just itself.
if state.Slot() == boundarySlot {
return blockRoot, nil
}
// Node uses genesis getters if the epoch boundary slot is genesis slot.
if boundarySlot == 0 {
r, err := s.genesisRoot(ctx)
if err != nil {
return [32]byte{}, nil
}
s.setEpochBoundaryRoot(boundarySlot, r)
return r, nil
}
// Now to find the epoch boundary root via DB.
r, _, err := s.lastSavedBlock(ctx, boundarySlot)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get last saved block for epoch boundary root")
}
// Set the epoch boundary root cache.
s.setEpochBoundaryRoot(boundarySlot, r)
return r, nil
}

View File

@@ -0,0 +1,316 @@
package stategen
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestSaveHotState_AlreadyHas(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
r := [32]byte{'A'}
// Pre cache the hot state.
service.hotStateCache.Put(r, beaconState)
if err := service.saveHotState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
// Should not save the state and state summary.
if service.beaconDB.HasState(ctx, r) {
t.Error("Should not have saved the state")
}
if service.beaconDB.HasStateSummary(ctx, r) {
t.Error("Should have saved the state summary")
}
testutil.AssertLogsDoNotContain(t, hook, "Saved full state on epoch boundary")
}
func TestSaveHotState_CanSaveOnEpochBoundary(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
r := [32]byte{'A'}
if err := service.saveHotState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
// Should save both state and state summary.
if !service.beaconDB.HasState(ctx, r) {
t.Error("Should have saved the state")
}
if !service.beaconDB.HasStateSummary(ctx, r) {
t.Error("Should have saved the state summary")
}
testutil.AssertLogsContain(t, hook, "Saved full state on epoch boundary")
}
func TestSaveHotState_NoSaveNotEpochBoundary(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch - 1)
r := [32]byte{'A'}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
if err := db.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
gRoot, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
t.Fatal(err)
}
if err := service.saveHotState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
// Should only save state summary.
if service.beaconDB.HasState(ctx, r) {
t.Error("Should not have saved the state")
}
if !service.beaconDB.HasStateSummary(ctx, r) {
t.Error("Should have saved the state summary")
}
testutil.AssertLogsDoNotContain(t, hook, "Saved full state on epoch boundary")
}
func TestLoadHoteStateByRoot_Cached(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
service.hotStateCache.Put(r, beaconState)
// This tests where hot state was already cached.
loadedState, err := service.loadHotStateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly cache state")
}
}
func TestLoadHoteStateByRoot_FromDBCanProcess(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
boundaryRoot := [32]byte{'A'}
blkRoot := [32]byte{'B'}
if err := service.beaconDB.SaveState(ctx, beaconState, boundaryRoot); err != nil {
t.Fatal(err)
}
targetSlot := uint64(10)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: targetSlot,
Root: blkRoot[:],
BoundaryRoot: boundaryRoot[:],
}); err != nil {
t.Fatal(err)
}
// This tests where hot state was not cached and needs processing.
loadedState, err := service.loadHotStateByRoot(ctx, blkRoot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != targetSlot {
t.Error("Did not correctly load state")
}
}
func TestLoadHoteStateByRoot_FromDBBoundaryCase(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
boundaryRoot := [32]byte{'A'}
if err := service.beaconDB.SaveState(ctx, beaconState, boundaryRoot); err != nil {
t.Fatal(err)
}
targetSlot := uint64(0)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: targetSlot,
Root: boundaryRoot[:],
BoundaryRoot: boundaryRoot[:],
}); err != nil {
t.Fatal(err)
}
// This tests where hot state was not cached but doesn't need processing
// because it on the epoch boundary slot.
loadedState, err := service.loadHotStateByRoot(ctx, boundaryRoot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != targetSlot {
t.Error("Did not correctly load state")
}
}
func TestLoadHoteStateBySlot_CanAdvanceSlotUsingCache(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
service.hotStateCache.Put(r, beaconState)
service.setEpochBoundaryRoot(0, r)
slot := uint64(10)
loadedState, err := service.loadHotStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly load state")
}
}
func TestLoadHoteStateBySlot_CanAdvanceSlotUsingDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
service.setEpochBoundaryRoot(0, r)
if err := service.beaconDB.SaveState(ctx, beaconState, r); err != nil {
t.Fatal(err)
}
slot := uint64(10)
loadedState, err := service.loadHotStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly load state")
}
}
func TestLoadEpochBoundaryRoot_Exists(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
r := [32]byte{'a'}
service.setEpochBoundaryRoot(params.BeaconConfig().SlotsPerEpoch, r)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
boundaryRoot, err := service.loadEpochBoundaryRoot(ctx, r, beaconState)
if err != nil {
t.Fatal(err)
}
if r != boundaryRoot {
t.Error("Did not correctly load boundary root")
}
}
func TestLoadEpochBoundaryRoot_SameSlot(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
r := [32]byte{'a'}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
boundaryRoot, err := service.loadEpochBoundaryRoot(ctx, r, beaconState)
if err != nil {
t.Fatal(err)
}
if r != boundaryRoot {
t.Error("Did not correctly load boundary root")
}
}
func TestLoadEpochBoundaryRoot_Genesis(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
r := [32]byte{'a'}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
if err := db.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
gRoot, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
boundaryRoot, err := service.loadEpochBoundaryRoot(ctx, r, beaconState)
if err != nil {
t.Fatal(err)
}
if boundaryRoot != gRoot {
t.Error("Did not correctly load boundary root")
}
}
func TestLoadEpochBoundaryRoot_LastSavedBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: service.lastArchivedSlot + 5}}
if err := service.beaconDB.SaveBlock(ctx, b1); err != nil {
t.Fatal(err)
}
b1Root, _ := ssz.HashTreeRoot(b1.Block)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch + 10)
boundaryRoot, err := service.loadEpochBoundaryRoot(ctx, [32]byte{}, beaconState)
if err != nil {
t.Fatal(err)
}
if boundaryRoot != b1Root {
t.Error("Did not correctly load boundary root")
}
}

View File

@@ -0,0 +1,99 @@
package stategen
import (
"context"
"encoding/hex"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// MigrateToCold advances the split point in between the cold and hot state sections.
// It moves the recent finalized states from the hot section to the cold section and
// only preserve the ones that's on archived point.
func (s *State) MigrateToCold(ctx context.Context, finalizedState *state.BeaconState, finalizedRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "stateGen.MigrateToCold")
defer span.End()
// Verify migration is sensible. The new finalized point must increase the current split slot, and
// on an epoch boundary for hot state summary scheme to work.
currentSplitSlot := s.splitInfo.slot
if currentSplitSlot > finalizedState.Slot() {
return nil
}
if !helpers.IsEpochStart(finalizedState.Slot()) {
return nil
}
// Move the states between split slot to finalized slot from hot section to the cold section.
filter := filters.NewFilter().SetStartSlot(currentSplitSlot).SetEndSlot(finalizedState.Slot() - 1)
blockRoots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
for _, r := range blockRoots {
stateSummary, err := s.beaconDB.StateSummary(ctx, r)
if err != nil {
return err
}
if stateSummary == nil || stateSummary.Slot == 0 {
continue
}
if stateSummary.Slot%s.slotsPerArchivedPoint == 0 {
archivePointIndex := stateSummary.Slot / s.slotsPerArchivedPoint
if s.beaconDB.HasState(ctx, r) {
hotState, err := s.beaconDB.State(ctx, r)
if err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivePointIndex); err != nil {
return err
}
} else {
hotState, err := s.ComputeStateUpToSlot(ctx, stateSummary.Slot)
if err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivePointIndex); err != nil {
return err
}
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, r, archivePointIndex); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": stateSummary.Slot,
"archiveIndex": archivePointIndex,
"root": hex.EncodeToString(bytesutil.Trunc(r[:])),
}).Info("Saved archived point during state migration")
}
if s.beaconDB.HasState(ctx, r) {
if err := s.beaconDB.DeleteState(ctx, r); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": stateSummary.Slot,
"root": hex.EncodeToString(bytesutil.Trunc(r[:])),
}).Info("Deleted state during migration")
}
s.deleteEpochBoundaryRoot(stateSummary.Slot)
}
// Update the split slot and root.
s.splitInfo = &splitSlotAndRoot{slot: finalizedState.Slot(), root: finalizedRoot}
log.WithFields(logrus.Fields{
"slot": s.splitInfo.slot,
"root": hex.EncodeToString(bytesutil.Trunc(s.splitInfo.root[:])),
}).Info("Set hot and cold state split point")
return nil
}

View File

@@ -0,0 +1,104 @@
package stategen
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestMigrateToCold_NoBlock(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
if err := service.MigrateToCold(ctx, beaconState, [32]byte{}); err != nil {
t.Fatal(err)
}
testutil.AssertLogsContain(t, hook, "Set hot and cold state split point")
}
func TestMigrateToCold_HigherSplitSlot(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.splitInfo.slot = 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
if err := service.MigrateToCold(ctx, beaconState, [32]byte{}); err != nil {
t.Fatal(err)
}
testutil.AssertLogsDoNotContain(t, hook, "Set hot and cold state split point")
}
func TestMigrateToCold_NotEpochStart(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1)
if err := service.MigrateToCold(ctx, beaconState, [32]byte{}); err != nil {
t.Fatal(err)
}
testutil.AssertLogsDoNotContain(t, hook, "Set hot and cold state split point")
}
func TestMigrateToCold_MigrationCompletes(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
b := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{Slot: 2},
}
if err := service.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
bRoot, _ := ssz.HashTreeRoot(b.Block)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bRoot[:], Slot: 2}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, beaconState, bRoot); err != nil {
t.Fatal(err)
}
service.slotsPerArchivedPoint = 2 // Ensure we can land on archived point.
if err := service.MigrateToCold(ctx, beaconState, [32]byte{}); err != nil {
t.Fatal(err)
}
if !service.beaconDB.HasArchivedPoint(ctx, 1) {
t.Error("Did not preserve archived point")
}
testutil.AssertLogsContain(t, hook, "Saved archived point during state migration")
testutil.AssertLogsContain(t, hook, "Deleted state during migration")
testutil.AssertLogsContain(t, hook, "Set hot and cold state split point")
}

View File

@@ -13,9 +13,57 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ComputeStateUpToSlot returns a processed state up to input target slot.
// If the last processed block is at slot 32, given input target slot at 40, this
// returns processed state up to slot 40 via empty slots.
// If there's duplicated blocks in a single slot, the canonical block will be returned.
func (s *State) ComputeStateUpToSlot(ctx context.Context, targetSlot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.ComputeStateUpToSlot")
defer span.End()
// Return genesis state if target slot is 0.
if targetSlot == 0 {
return s.beaconDB.GenesisState(ctx)
}
lastBlockRoot, lastBlockSlot, err := s.lastSavedBlock(ctx, targetSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get last saved block")
}
lastBlockRootForState, err := s.lastSavedState(ctx, targetSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get last valid state")
}
lastState, err := s.beaconDB.State(ctx, lastBlockRootForState)
if err != nil {
return nil, err
}
if lastState == nil {
return nil, errUnknownState
}
// Return if the last valid state's slot is higher than the target slot.
if lastState.Slot() >= targetSlot {
return lastState, nil
}
blks, err := s.LoadBlocks(ctx, lastState.Slot()+1, lastBlockSlot, lastBlockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not load blocks")
}
lastState, err = s.ReplayBlocks(ctx, lastState, blks, targetSlot)
if err != nil {
return nil, errors.Wrap(err, "could not replay blocks")
}
return lastState, nil
}
// ReplayBlocks replays the input blocks on the input state until the target slot is reached.
func (s *State) ReplayBlocks(ctx context.Context, state *state.BeaconState, signed []*ethpb.SignedBeaconBlock, targetSlot uint64) (*state.BeaconState, error) {
var err error
@@ -68,11 +116,15 @@ func (s *State) LoadBlocks(ctx context.Context, startSlot uint64, endSlot uint64
if len(blocks) != len(blockRoots) {
return nil, errors.New("length of blocks and roots don't match")
}
// Return early if there's no block given the input.
length := len(blocks)
if length == 0 {
return nil, nil
}
// The last retrieved block root has to match input end block root.
// Covers the edge case if there's multiple blocks on the same end slot,
// the end root may not be the last index in `blockRoots`.
length := len(blocks)
for length >= 3 && blocks[length-1].Block.Slot == blocks[length-2].Block.Slot && blockRoots[length-1] != endBlockRoot {
length--
if blockRoots[length-2] == endBlockRoot {
@@ -196,7 +248,8 @@ func (s *State) lastSavedBlock(ctx context.Context, slot uint64) ([32]byte, uint
return [32]byte{}, 0, err
}
if len(rs) == 0 {
return [32]byte{}, 0, errors.New("block root has 0 length")
// Return zero hash if there hasn't been any block in the DB yet.
return params.BeaconChainConfig{}.ZeroHash, 0, nil
}
lastRoot := rs[len(rs)-1]
@@ -235,7 +288,8 @@ func (s *State) lastSavedState(ctx context.Context, slot uint64) ([32]byte, erro
return [32]byte{}, err
}
if len(rs) == 0 {
return [32]byte{}, errors.New("block root has 0 length")
// Return zero hash if there hasn't been any block in the DB yet.
return params.BeaconChainConfig{}.ZeroHash, nil
}
for i := len(rs) - 1; i >= 0; i-- {
// Stop until a state is saved.

View File

@@ -5,6 +5,7 @@ import (
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
@@ -17,6 +18,70 @@ import (
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestComputeStateUpToSlot_GenesisState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
s, err := service.ComputeStateUpToSlot(ctx, 0)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(s.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not receive correct genesis state")
}
}
func TestComputeStateUpToSlot_CanProcessUpTo(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
s, err := service.ComputeStateUpToSlot(ctx, params.BeaconConfig().SlotsPerEpoch+1)
if err != nil {
t.Fatal(err)
}
if s.Slot() != params.BeaconConfig().SlotsPerEpoch+1 {
t.Log(s.Slot())
t.Error("Did not receive correct processed state")
}
}
func TestReplayBlocks_AllSkipSlots(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
@@ -350,7 +415,7 @@ func TestLastSavedBlock_CanGet(t *testing.T) {
}
}
func TestLastSavedBlock_OutOfRange(t *testing.T) {
func TestLastSavedBlock_NoSavedBlock(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
@@ -364,9 +429,12 @@ func TestLastSavedBlock_OutOfRange(t *testing.T) {
t.Fatal(err)
}
_, _, err := s.lastSavedBlock(ctx, s.lastArchivedSlot+1)
if err.Error() != "block root has 0 length" {
t.Error("Did not get wanted error")
r, slot, err := s.lastSavedBlock(ctx, s.lastArchivedSlot+1)
if err != nil {
t.Fatal(err)
}
if slot != 0 || r != params.BeaconConfig().ZeroHash {
t.Error("Did not get no saved block info")
}
}
@@ -439,7 +507,7 @@ func TestLastSavedState_CanGet(t *testing.T) {
}
}
func TestLastSavedState_OutOfRange(t *testing.T) {
func TestLastSavedState_NoSavedBlockState(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
@@ -453,9 +521,12 @@ func TestLastSavedState_OutOfRange(t *testing.T) {
t.Fatal(err)
}
_, err := s.lastSavedState(ctx, s.lastArchivedSlot+1)
if err.Error() != "block root has 0 length" {
t.Error("Did not get wanted error")
r, err := s.lastSavedState(ctx, s.lastArchivedSlot+1)
if err != nil {
t.Fatal(err)
}
if r != params.BeaconConfig().ZeroHash {
t.Error("Did not get no saved block info")
}
}

View File

@@ -3,6 +3,7 @@ package stategen
import (
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -12,8 +13,18 @@ import (
type State struct {
beaconDB db.NoHeadAccessDatabase
lastArchivedSlot uint64
slotsPerArchivedPoint uint64
epochBoundarySlotToRoot map[uint64][32]byte
epochBoundaryLock sync.RWMutex
hotStateCache *cache.HotStateCache
splitInfo *splitSlotAndRoot
}
// This tracks the split point. The point where slot and the block root of
// cold and hot sections of the DB splits.
type splitSlotAndRoot struct {
slot uint64
root [32]byte
}
// New returns a new state management object.
@@ -21,6 +32,8 @@ func New(db db.NoHeadAccessDatabase) *State {
return &State{
beaconDB: db,
epochBoundarySlotToRoot: make(map[uint64][32]byte),
hotStateCache: cache.NewHotStateCache(),
splitInfo: &splitSlotAndRoot{slot: 0, root: params.BeaconConfig().ZeroHash},
}
}

View File

@@ -6,6 +6,7 @@ go_library(
"arrays.go",
"attestations.go",
"blocks.go",
"hash_function.go",
"helpers.go",
"state_root.go",
"validators.go",
@@ -24,7 +25,6 @@ go_library(
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_minio_sha256_simd//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_protolambda_zssz//htr:go_default_library",
"@com_github_protolambda_zssz//merkle:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -7,6 +7,8 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -34,6 +36,7 @@ func BlockHeaderRoot(header *ethpb.BeaconBlockHeader) ([32]byte, error) {
// a BeaconBlockHeader struct according to the eth2
// Simple Serialize specification.
func Eth1Root(eth1Data *ethpb.Eth1Data) ([32]byte, error) {
enc := make([]byte, 0, 96)
fieldRoots := make([][]byte, 3)
for i := 0; i < len(fieldRoots); i++ {
fieldRoots[i] = make([]byte, 32)
@@ -42,17 +45,32 @@ func Eth1Root(eth1Data *ethpb.Eth1Data) ([32]byte, error) {
if len(eth1Data.DepositRoot) > 0 {
depRoot := bytesutil.ToBytes32(eth1Data.DepositRoot)
fieldRoots[0] = depRoot[:]
enc = append(enc, depRoot[:]...)
}
eth1DataCountBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DataCountBuf, eth1Data.DepositCount)
eth1CountRoot := bytesutil.ToBytes32(eth1DataCountBuf)
fieldRoots[1] = eth1CountRoot[:]
enc = append(enc, eth1CountRoot[:]...)
if len(eth1Data.BlockHash) > 0 {
blockHash := bytesutil.ToBytes32(eth1Data.BlockHash)
fieldRoots[2] = blockHash[:]
enc = append(enc, blockHash[:]...)
}
if featureconfig.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
return found.([32]byte), nil
}
}
}
return bitwiseMerkleize(fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
root, err := bitwiseMerkleize(fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
if err != nil {
return [32]byte{}, err
}
if featureconfig.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(enc), root, 32)
}
return root, nil
}
// Eth1DataVotesRoot computes the HashTreeRoot Merkleization of
@@ -60,13 +78,21 @@ func Eth1Root(eth1Data *ethpb.Eth1Data) ([32]byte, error) {
// Simple Serialize specification.
func Eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
eth1VotesRoots := make([][]byte, 0)
enc := make([]byte, len(eth1DataVotes)*32)
for i := 0; i < len(eth1DataVotes); i++ {
eth1, err := Eth1Root(eth1DataVotes[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute eth1data merkleization")
}
copy(enc[(i*32):(i+1)*32], eth1[:])
eth1VotesRoots = append(eth1VotesRoots, eth1[:])
}
hashKey := hashutil.FastSum256(enc)
if featureconfig.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
return found.([32]byte), nil
}
}
eth1Chunks, err := pack(eth1VotesRoots)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not chunk eth1 votes roots")
@@ -82,5 +108,9 @@ func Eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
// We need to mix in the length of the slice.
eth1VotesRootBufRoot := make([]byte, 32)
copy(eth1VotesRootBufRoot, eth1VotesRootBuf.Bytes())
return mixInLength(eth1VotesRootsRoot, eth1VotesRootBufRoot), nil
root := mixInLength(eth1VotesRootsRoot, eth1VotesRootBufRoot)
if featureconfig.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
}
return root, nil
}

View File

@@ -0,0 +1,26 @@
package stateutil
import "encoding/binary"
// HashFn describes a hash function and its associated bytes buffer
type HashFn struct {
f func(input []byte) [32]byte
bytesBuffer [64]byte
}
// Combi describes a method which merges two 32-byte arrays and hashes
// them.
func (h HashFn) Combi(a [32]byte, b [32]byte) [32]byte {
copy(h.bytesBuffer[:32], a[:])
copy(h.bytesBuffer[32:], b[:])
return h.f(h.bytesBuffer[:])
}
// MixIn describes a method where we add in the provided
// integer to the end of the byte array and hash it.
func (h HashFn) MixIn(a [32]byte, i uint64) [32]byte {
copy(h.bytesBuffer[:32], a[:])
copy(h.bytesBuffer[32:], make([]byte, 32, 32))
binary.LittleEndian.PutUint64(h.bytesBuffer[32:], i)
return h.f(h.bytesBuffer[:])
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/minio/sha256-simd"
"github.com/pkg/errors"
"github.com/protolambda/zssz/htr"
"github.com/protolambda/zssz/merkle"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/shared/hashutil"
@@ -47,11 +46,13 @@ func bitwiseMerkleize(chunks [][]byte, count uint64, limit uint64) ([32]byte, er
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}
hasher := htr.HashFn(hashutil.CustomSHA256Hasher())
hashFn := &HashFn{
f: hashutil.CustomSHA256Hasher(),
}
leafIndexer := func(i uint64) []byte {
return chunks[i]
}
return merkle.Merkleize(hasher, count, limit, leafIndexer), nil
return merkle.Merkleize(hashFn.f, count, limit, leafIndexer), nil
}
// bitwiseMerkleizeArrays is used when a set of 32-byte root chunks are provided.
@@ -59,11 +60,13 @@ func bitwiseMerkleizeArrays(chunks [][32]byte, count uint64, limit uint64) ([32]
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}
hasher := htr.HashFn(hashutil.CustomSHA256Hasher())
hashFn := &HashFn{
f: hashutil.CustomSHA256Hasher(),
}
leafIndexer := func(i uint64) []byte {
return chunks[i][:]
}
return merkle.Merkleize(hasher, count, limit, leafIndexer), nil
return merkle.Merkleize(hashFn.f, count, limit, leafIndexer), nil
}
func pack(serializedItems [][]byte) ([][]byte, error) {

View File

@@ -28,7 +28,7 @@ const fetchRequestsBuffer = 8 // number of pending fetch requests
var (
errNoPeersAvailable = errors.New("no peers available, waiting for reconnect")
errCtxIsDone = errors.New("fetcher's context is done, reinitialize")
errFetcherCtxIsDone = errors.New("fetcher's context is done, reinitialize")
errStartSlotIsTooHigh = errors.New("start slot is bigger than highest finalized slot")
)
@@ -42,14 +42,14 @@ type blocksFetcherConfig struct {
// On an incoming requests, requested block range is evenly divided
// among available peers (for fair network load distribution).
type blocksFetcher struct {
ctx context.Context
cancel context.CancelFunc
headFetcher blockchain.HeadFetcher
p2p p2p.P2P
rateLimiter *leakybucket.Collector
requests chan *fetchRequestParams // incoming fetch requests from downstream clients
receivedFetchResponses chan *fetchRequestResponse // responses from peers are forwarded to downstream clients
quit chan struct{} // termination notifier
ctx context.Context
cancel context.CancelFunc
headFetcher blockchain.HeadFetcher
p2p p2p.P2P
rateLimiter *leakybucket.Collector
fetchRequests chan *fetchRequestParams
fetchResponses chan *fetchRequestResponse
quit chan struct{} // termination notifier
}
// fetchRequestParams holds parameters necessary to schedule a fetch request.
@@ -77,14 +77,14 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
false /* deleteEmptyBuckets */)
return &blocksFetcher{
ctx: ctx,
cancel: cancel,
headFetcher: cfg.headFetcher,
p2p: cfg.p2p,
rateLimiter: rateLimiter,
requests: make(chan *fetchRequestParams, fetchRequestsBuffer),
receivedFetchResponses: make(chan *fetchRequestResponse),
quit: make(chan struct{}),
ctx: ctx,
cancel: cancel,
headFetcher: cfg.headFetcher,
p2p: cfg.p2p,
rateLimiter: rateLimiter,
fetchRequests: make(chan *fetchRequestParams, fetchRequestsBuffer),
fetchResponses: make(chan *fetchRequestResponse),
quit: make(chan struct{}),
}
}
@@ -92,7 +92,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
func (f *blocksFetcher) start() error {
select {
case <-f.ctx.Done():
return errCtxIsDone
return errFetcherCtxIsDone
default:
go f.loop()
return nil
@@ -107,21 +107,32 @@ func (f *blocksFetcher) stop() {
// requestResponses exposes a channel into which fetcher pushes generated request responses.
func (f *blocksFetcher) requestResponses() <-chan *fetchRequestResponse {
return f.receivedFetchResponses
return f.fetchResponses
}
// loop is a main fetcher loop, listens for incoming requests/cancellations, forwards outgoing responses.
func (f *blocksFetcher) loop() {
defer close(f.receivedFetchResponses)
defer close(f.quit)
// Wait for all loop's goroutines to finish, and safely release resources.
wg := &sync.WaitGroup{}
defer func() {
wg.Wait()
close(f.fetchResponses)
}()
for {
select {
case req := <-f.requests:
go f.handleRequest(req.ctx, req.start, req.count)
case <-f.ctx.Done():
log.Debug("Context closed, exiting goroutine")
log.Debug("Context closed, exiting goroutine (blocks fetcher)")
return
case req := <-f.fetchRequests:
wg.Add(1)
go func() {
defer wg.Done()
f.handleRequest(req.ctx, req.start, req.count)
}()
}
}
}
@@ -130,9 +141,9 @@ func (f *blocksFetcher) loop() {
func (f *blocksFetcher) scheduleRequest(ctx context.Context, start, count uint64) error {
select {
case <-f.ctx.Done():
return errCtxIsDone
return errFetcherCtxIsDone
default:
f.requests <- &fetchRequestParams{
f.fetchRequests <- &fetchRequestParams{
ctx: ctx,
start: start,
count: count,
@@ -146,12 +157,18 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start, count uint64)
ctx, span := trace.StartSpan(ctx, "initialsync.handleRequest")
defer span.End()
if ctx.Err() != nil {
f.receivedFetchResponses <- &fetchRequestResponse{
start: start,
count: count,
err: ctx.Err(),
// sendResponse ensures that response is not sent to a closed channel (when context is done).
sendResponse := func(ctx context.Context, response *fetchRequestResponse) {
if ctx.Err() != nil {
log.WithError(ctx.Err()).Debug("Can not send fetch request response")
return
}
f.fetchResponses <- response
}
if ctx.Err() != nil {
sendResponse(ctx, nil)
return
}
@@ -173,31 +190,31 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start, count uint64)
highestFinalizedSlot := helpers.StartSlot(finalizedEpoch + 1)
if start > highestFinalizedSlot {
log.WithError(errStartSlotIsTooHigh).Debug("Block fetch request failed")
f.receivedFetchResponses <- &fetchRequestResponse{
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
err: errStartSlotIsTooHigh,
}
})
return
}
resp, err := f.collectPeerResponses(ctx, root, finalizedEpoch, start, 1, count, peers)
if err != nil {
log.WithError(err).Debug("Block fetch request failed")
f.receivedFetchResponses <- &fetchRequestResponse{
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
err: err,
}
})
return
}
f.receivedFetchResponses <- &fetchRequestResponse{
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
blocks: resp,
peers: peers,
}
})
}
// collectPeerResponses orchestrates block fetching from the available peers.

View File

@@ -44,13 +44,13 @@ func TestBlocksFetcherInitStartStop(t *testing.T) {
select {
case <-fetcher.requestResponses():
default:
t.Error("receivedFetchResponses channel is leaked")
t.Error("fetchResponses channel is leaked")
}
})
t.Run("re-starting of stopped fetcher", func(t *testing.T) {
if err := fetcher.start(); err == nil {
t.Errorf("expected error not returned: %v", errCtxIsDone)
t.Errorf("expected error not returned: %v", errFetcherCtxIsDone)
}
})
@@ -405,6 +405,20 @@ func TestBlocksFetcherRoundRobin(t *testing.T) {
}
}
func TestBlocksFetcherScheduleRequest(t *testing.T) {
t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: nil,
p2p: nil,
})
cancel()
if err := fetcher.scheduleRequest(ctx, 1, blockBatchSize); err == nil {
t.Errorf("expected error: %v", errFetcherCtxIsDone)
}
})
}
func TestBlocksFetcherHandleRequest(t *testing.T) {
chainConfig := struct {
expectedBlockSlots []uint64
@@ -427,43 +441,58 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
hook := logTest.NewGlobal()
mc, p2p, beaconDB := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(
ctx,
&blocksFetcherConfig{
defer dbtest.TeardownDB(t, beaconDB)
t.Run("context cancellation", func(t *testing.T) {
hook.Reset()
ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: mc,
p2p: p2p,
})
requestCtx, _ := context.WithTimeout(context.Background(), 2*time.Second)
go fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchSize /* count */)
cancel()
fetcher.handleRequest(ctx, 1, blockBatchSize)
testutil.AssertLogsContain(t, hook, "Can not send fetch request response")
testutil.AssertLogsContain(t, hook, "context canceled")
})
var blocks []*eth.SignedBeaconBlock
select {
case <-ctx.Done():
t.Error(ctx.Err())
case resp := <-fetcher.requestResponses():
if resp.err != nil {
t.Error(resp.err)
} else {
blocks = resp.blocks
t.Run("receive blocks", func(t *testing.T) {
hook.Reset()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: mc,
p2p: p2p,
})
requestCtx, _ := context.WithTimeout(context.Background(), 2*time.Second)
go fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchSize /* count */)
var blocks []*eth.SignedBeaconBlock
select {
case <-ctx.Done():
t.Error(ctx.Err())
case resp := <-fetcher.requestResponses():
if resp.err != nil {
t.Error(resp.err)
} else {
blocks = resp.blocks
}
}
}
if len(blocks) != blockBatchSize {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchSize, len(blocks))
}
testutil.AssertLogsContain(t, hook, "Received blocks")
if len(blocks) != blockBatchSize {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchSize, len(blocks))
}
testutil.AssertLogsContain(t, hook, "Received blocks")
var receivedBlockSlots []uint64
for _, blk := range blocks {
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
}
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(chainConfig.expectedBlockSlots, receivedBlockSlots), chainConfig.expectedBlockSlots); len(missing) > 0 {
t.Errorf("Missing blocks at slots %v", missing)
}
dbtest.TeardownDB(t, beaconDB)
var receivedBlockSlots []uint64
for _, blk := range blocks {
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
}
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(chainConfig.expectedBlockSlots, receivedBlockSlots), chainConfig.expectedBlockSlots); len(missing) > 0 {
t.Errorf("Missing blocks at slots %v", missing)
}
})
}
func TestBlocksFetcherRequestBeaconBlocksByRangeRequest(t *testing.T) {

131
prysm.sh Executable file
View File

@@ -0,0 +1,131 @@
#!/bin/bash
set -eu
# Use this script to download the latest Prysm release binary.
# Usage: ./prysm.sh PROCESS FLAGS
# PROCESS can be one of beacon-chain or validator.
# FLAGS are the flags or arguments passed to the PROCESS.
# Downloaded binaries are saved to ./dist.
# Use USE_PRYSM_VERSION to specify a specific release version.
# Example: USE_PRYSM_VERSION=v0.3.3 ./prysm.sh beacon-chain
function color() {
# Usage: color "31;5" "string"
# Some valid values for color:
# - 5 blink, 1 strong, 4 underlined
# - fg: 31 red, 32 green, 33 yellow, 34 blue, 35 purple, 36 cyan, 37 white
# - bg: 40 black, 41 red, 44 blue, 45 purple
printf '\033[%sm%s\033[0m\n' "$@"
}
# `readlink -f` that works on OSX too.
function get_realpath() {
if [ "$(uname -s)" == "Darwin" ]; then
local queue="$1"
if [[ "${queue}" != /* ]] ; then
# Make sure we start with an absolute path.
queue="${PWD}/${queue}"
fi
local current=""
while [ -n "${queue}" ]; do
# Removing a trailing /.
queue="${queue#/}"
# Pull the first path segment off of queue.
local segment="${queue%%/*}"
# If this is the last segment.
if [[ "${queue}" != */* ]] ; then
segment="${queue}"
queue=""
else
# Remove that first segment.
queue="${queue#*/}"
fi
local link="${current}/${segment}"
if [ -h "${link}" ] ; then
link="$(readlink "${link}")"
queue="${link}/${queue}"
if [[ "${link}" == /* ]] ; then
current=""
fi
else
current="${link}"
fi
done
echo "${current}"
else
readlink -f "$1"
fi
}
# Complain if no arguments were provided.
if [ "$#" -lt 1 ]; then
color "31" "Usage: ./prysm.sh PROCESS FLAGS."
color "31" "PROCESS can be beacon-chain or validator."
exit 1
fi
readonly wrapper_dir="$(dirname "$(get_realpath "${BASH_SOURCE[0]}")")/dist"
arch=$(uname -m)
arch=${arch/x86_64/amd64}
arch=${arch/aarch64/arm64}
readonly os_arch_suffix="$(uname -s | tr '[:upper:]' '[:lower:]')-$arch"
mkdir -p $wrapper_dir
function get_prysm_version() {
if [[ -n ${USE_PRYSM_VERSION:-} ]]; then
readonly reason="specified in \$USE_PRYSM_VERSION"
readonly prysm_version="${USE_PRYSM_VERSION}"
else
# Find the latest Prysm version available for download.
readonly reason="automatically selected latest available version"
prysm_version=$(curl -s https://api.github.com/repos/prysmaticlabs/prysm/releases/latest | grep "tag_name" | cut -d : -f 2,3 | tr -d \" | tr -d , | tr -d [:space:])
readonly prysm_version
fi
}
get_prysm_version
color "37" "Latest Prysm version is $prysm_version."
BEACON_CHAIN_REAL="${wrapper_dir}/beacon-chain-${prysm_version}-${os_arch_suffix}"
VALIDATOR_REAL="${wrapper_dir}/validator-${prysm_version}-${os_arch_suffix}"
if [[ ! -x $BEACON_CHAIN_REAL ]]; then
color "34" "Downloading beacon chain@${prysm_version} to ${BEACON_CHAIN_REAL} (${reason})"
curl -L "https://github.com/prysmaticlabs/prysm/releases/download/${prysm_version}/beacon-chain-${prysm_version}-${os_arch_suffix}" -o $BEACON_CHAIN_REAL
chmod +x $BEACON_CHAIN_REAL
else
color "37" "Beacon chain is up to date."
fi
if [[ ! -x $VALIDATOR_REAL ]]; then
color "34" "Downloading validator@${prysm_version} to ${VALIDATOR_REAL} (${reason})"
curl -L "https://github.com/prysmaticlabs/prysm/releases/download/${prysm_version}/validator-${prysm_version}-${os_arch_suffix}" -o $VALIDATOR_REAL
chmod +x $VALIDATOR_REAL
else
color "37" "Validator is up to date."
fi
case $1 in
beacon-chain)
readonly process=$BEACON_CHAIN_REAL
;;
validator)
readonly process=$VALIDATOR_REAL
;;
*)
color "31" "Usage: ./prysm.sh PROCESS FLAGS."
color "31" "PROCESS can be beacon-chain or validator."
;;
esac
color "36" "Starting Prysm $1 ${@:2}"
exec -a "$0" "${process}" "${@:2}"

View File

@@ -1,5 +1,7 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
# gazelle:resolve go github.com/herumi/bls-eth-go-binary/bls @herumi_bls_eth_go_binary//:go_default_library
go_library(
name = "go_default_library",
srcs = ["bls.go"],
@@ -11,8 +13,8 @@ go_library(
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_herumi_bls_eth_go_binary//bls:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@herumi_bls_eth_go_binary//:go_default_library",
],
)

View File

@@ -36,6 +36,6 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_ghodss_yaml//:go_default_library",
"@com_github_herumi_bls_eth_go_binary//bls:go_default_library",
"@herumi_bls_eth_go_binary//:go_default_library",
],
)

View File

@@ -134,7 +134,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
}
h.cpuW = f
h.cpuFile = file
log.Info("CPU profiling started", "dump", h.cpuFile)
log.Info("CPU profiling started", " dump ", h.cpuFile)
return nil
}
@@ -146,7 +146,7 @@ func (h *HandlerT) StopCPUProfile() error {
if h.cpuW == nil {
return errors.New("CPU profiling not in progress")
}
log.Info("Done writing CPU profile", "dump", h.cpuFile)
log.Info("Done writing CPU profile", " dump ", h.cpuFile)
if err := h.cpuW.Close(); err != nil {
return err
}

View File

@@ -53,6 +53,9 @@ type Flags struct {
// as the chain head. UNSAFE, use with caution.
DisableForkChoice bool
// BroadcastSlashings enables p2p broadcasting of proposer or attester slashing.
BroadcastSlashings bool
// Cache toggles.
EnableSSZCache bool // EnableSSZCache see https://github.com/prysmaticlabs/prysm/pull/4558.
EnableEth1DataVoteCache bool // EnableEth1DataVoteCache; see https://github.com/prysmaticlabs/prysm/issues/3106.
@@ -170,10 +173,7 @@ func ConfigureBeaconChain(ctx *cli.Context) {
func ConfigureValidator(ctx *cli.Context) {
complainOnDeprecatedFlags(ctx)
cfg := &Flags{}
if ctx.GlobalBool(minimalConfigFlag.Name) {
log.Warn("Using minimal config")
cfg.MinimalConfig = true
}
cfg = configureConfig(ctx, cfg)
if ctx.GlobalBool(protectProposerFlag.Name) {
log.Warn("Enabled validator proposal slashing protection.")
cfg.ProtectProposer = true

View File

@@ -5,6 +5,10 @@ import (
)
var (
broadcastSlashingFlag = cli.BoolFlag{
Name: "broadcast-slashing",
Usage: "Broadcast slashings from slashing pool.",
}
noCustomConfigFlag = cli.BoolFlag{
Name: "no-custom-config",
Usage: "Run the beacon chain with the real parameters from phase 0.",
@@ -293,6 +297,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
checkHeadState,
enableNoiseHandshake,
dontPruneStateStartUp,
broadcastSlashingFlag,
}...)
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"chain_data.go",
"historical_data_retrieval.go",
"metrics.go",
"receivers.go",
"service.go",
"submit.go",
@@ -20,6 +21,8 @@ go_library(
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//plugin/ocgrpc:go_default_library",

View File

@@ -0,0 +1,13 @@
package beaconclient
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
slasherNumAttestationsReceived = promauto.NewCounter(prometheus.CounterOpts{
Name: "slasher_attestations_received_total",
Help: "The # of attestations received by slasher",
})
)

View File

@@ -3,8 +3,10 @@ package beaconclient
import (
"context"
"io"
"time"
ptypes "github.com/gogo/protobuf/types"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -51,6 +53,8 @@ func (bs *Service) receiveAttestations(ctx context.Context) {
log.WithError(err).Error("Failed to retrieve attestations stream")
return
}
go bs.collectReceivedAttestations(ctx)
for {
res, err := stream.Recv()
// If the stream is closed, we stop the loop.
@@ -66,15 +70,43 @@ func (bs *Service) receiveAttestations(ctx context.Context) {
log.WithError(err).Error("Could not receive attestations from beacon node")
return
}
log.WithFields(logrus.Fields{
"slot": res.Data.Slot,
"indices": res.AttestingIndices,
}).Debug("Received attestation from beacon node")
if err := bs.slasherDB.SaveIndexedAttestation(ctx, res); err != nil {
log.WithError(err).Error("Could not save indexed attestation")
continue
}
// We send the received attestation over the attestation feed.
bs.attestationFeed.Send(res)
bs.receivedAttestationsBuffer <- res
}
}
func (bs *Service) collectReceivedAttestations(ctx context.Context) {
ctx, span := trace.StartSpan(ctx, "beaconclient.collectReceivedAttestations")
defer span.End()
var atts []*ethpb.IndexedAttestation
ticker := time.NewTicker(2 * time.Second)
for {
select {
case <-ticker.C:
if len(atts) > 0 {
bs.collectedAttestationsBuffer <- atts
atts = []*ethpb.IndexedAttestation{}
}
case att := <-bs.receivedAttestationsBuffer:
atts = append(atts, att)
case collectedAtts := <-bs.collectedAttestationsBuffer:
if err := bs.slasherDB.SaveIndexedAttestations(ctx, collectedAtts); err != nil {
log.WithError(err).Error("Could not save indexed attestation")
continue
}
log.Infof("%d attestations for slot %d saved to slasher DB", len(collectedAtts), collectedAtts[0].Data.Slot)
slasherNumAttestationsReceived.Add(float64(len(collectedAtts)))
// After saving, we send the received attestation over the attestation feed.
for _, att := range collectedAtts {
log.WithFields(logrus.Fields{
"slot": att.Data.Slot,
"indices": att.AttestingIndices,
}).Debug("Sending attestation to detection service")
bs.attestationFeed.Send(att)
}
case <-ctx.Done():
return
}
}
}

View File

@@ -3,12 +3,14 @@ package beaconclient
import (
"context"
"testing"
"time"
ptypes "github.com/gogo/protobuf/types"
"github.com/golang/mock/gomock"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/mock"
testDB "github.com/prysmaticlabs/prysm/slasher/db/testing"
)
func TestService_ReceiveBlocks(t *testing.T) {
@@ -42,8 +44,10 @@ func TestService_ReceiveAttestations(t *testing.T) {
client := mock.NewMockBeaconChainClient(ctrl)
bs := Service{
beaconClient: client,
blockFeed: new(event.Feed),
beaconClient: client,
blockFeed: new(event.Feed),
receivedAttestationsBuffer: make(chan *ethpb.IndexedAttestation, 1),
collectedAttestationsBuffer: make(chan []*ethpb.IndexedAttestation, 1),
}
stream := mock.NewMockBeaconChain_StreamIndexedAttestationsClient(ctrl)
ctx, cancel := context.WithCancel(context.Background())
@@ -65,3 +69,52 @@ func TestService_ReceiveAttestations(t *testing.T) {
})
bs.receiveAttestations(ctx)
}
func TestService_ReceiveAttestations_Batched(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconChainClient(ctrl)
bs := Service{
beaconClient: client,
blockFeed: new(event.Feed),
slasherDB: testDB.SetupSlasherDB(t, false),
attestationFeed: new(event.Feed),
receivedAttestationsBuffer: make(chan *ethpb.IndexedAttestation, 1),
collectedAttestationsBuffer: make(chan []*ethpb.IndexedAttestation, 1),
}
stream := mock.NewMockBeaconChain_StreamIndexedAttestationsClient(ctrl)
ctx, cancel := context.WithCancel(context.Background())
att := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Slot: 5,
Target: &ethpb.Checkpoint{
Epoch: 5,
},
},
Signature: []byte{1, 2},
}
client.EXPECT().StreamIndexedAttestations(
gomock.Any(),
&ptypes.Empty{},
).Return(stream, nil)
stream.EXPECT().Context().Return(ctx).AnyTimes()
stream.EXPECT().Recv().Return(
att,
nil,
).Do(func() {
time.Sleep(2 * time.Second)
cancel()
})
go bs.receiveAttestations(ctx)
bs.receivedAttestationsBuffer <- att
att.Data.Target.Epoch = 6
bs.receivedAttestationsBuffer <- att
att.Data.Target.Epoch = 8
bs.receivedAttestationsBuffer <- att
atts := <-bs.collectedAttestationsBuffer
if len(atts) != 3 {
t.Fatalf("Expected %d received attestations to be batched", len(atts))
}
}

View File

@@ -39,21 +39,23 @@ type ChainFetcher interface {
// Service struct for the beaconclient service of the slasher.
type Service struct {
ctx context.Context
cancel context.CancelFunc
cert string
conn *grpc.ClientConn
provider string
beaconClient ethpb.BeaconChainClient
slasherDB db.Database
nodeClient ethpb.NodeClient
clientFeed *event.Feed
blockFeed *event.Feed
attestationFeed *event.Feed
proposerSlashingsChan chan *ethpb.ProposerSlashing
attesterSlashingsChan chan *ethpb.AttesterSlashing
attesterSlashingsFeed *event.Feed
proposerSlashingsFeed *event.Feed
ctx context.Context
cancel context.CancelFunc
cert string
conn *grpc.ClientConn
provider string
beaconClient ethpb.BeaconChainClient
slasherDB db.Database
nodeClient ethpb.NodeClient
clientFeed *event.Feed
blockFeed *event.Feed
attestationFeed *event.Feed
proposerSlashingsChan chan *ethpb.ProposerSlashing
attesterSlashingsChan chan *ethpb.AttesterSlashing
attesterSlashingsFeed *event.Feed
proposerSlashingsFeed *event.Feed
receivedAttestationsBuffer chan *ethpb.IndexedAttestation
collectedAttestationsBuffer chan []*ethpb.IndexedAttestation
}
// Config options for the beaconclient service.
@@ -69,18 +71,20 @@ type Config struct {
func NewBeaconClientService(ctx context.Context, cfg *Config) *Service {
ctx, cancel := context.WithCancel(ctx)
return &Service{
cert: cfg.BeaconCert,
ctx: ctx,
cancel: cancel,
provider: cfg.BeaconProvider,
blockFeed: new(event.Feed),
clientFeed: new(event.Feed),
attestationFeed: new(event.Feed),
slasherDB: cfg.SlasherDB,
proposerSlashingsChan: make(chan *ethpb.ProposerSlashing, 1),
attesterSlashingsChan: make(chan *ethpb.AttesterSlashing, 1),
attesterSlashingsFeed: cfg.AttesterSlashingsFeed,
proposerSlashingsFeed: cfg.ProposerSlashingsFeed,
cert: cfg.BeaconCert,
ctx: ctx,
cancel: cancel,
provider: cfg.BeaconProvider,
blockFeed: new(event.Feed),
clientFeed: new(event.Feed),
attestationFeed: new(event.Feed),
slasherDB: cfg.SlasherDB,
proposerSlashingsChan: make(chan *ethpb.ProposerSlashing, 1),
attesterSlashingsChan: make(chan *ethpb.AttesterSlashing, 1),
attesterSlashingsFeed: cfg.AttesterSlashingsFeed,
proposerSlashingsFeed: cfg.ProposerSlashingsFeed,
receivedAttestationsBuffer: make(chan *ethpb.IndexedAttestation, 1),
collectedAttestationsBuffer: make(chan []*ethpb.IndexedAttestation, 1),
}
}

14
slasher/cache/BUILD.bazel vendored Normal file
View File

@@ -0,0 +1,14 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["span_cache.go"],
importpath = "github.com/prysmaticlabs/prysm/slasher/cache",
visibility = ["//slasher:__subpackages__"],
deps = [
"//slasher/detection/attestations/types:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)

72
slasher/cache/span_cache.go vendored Normal file
View File

@@ -0,0 +1,72 @@
package cache
import (
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/types"
)
var (
// epochSpansCacheSize defines the max number of epoch spans the cache can hold.
epochSpansCacheSize = 256
// Metrics for the span cache.
epochSpansCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "epoch_spans_cache_hit",
Help: "The total number of cache hits on the epoch spans cache.",
})
epochSpansCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "epoch_spans_cache_miss",
Help: "The total number of cache misses on the epoch spans cache.",
})
)
// EpochSpansCache is used to store the spans needed on a per-epoch basis for slashing detection.
type EpochSpansCache struct {
cache *lru.Cache
}
// NewEpochSpansCache initializes the map and underlying cache.
func NewEpochSpansCache(size int, onEvicted func(key interface{}, value interface{})) (*EpochSpansCache, error) {
if size != 0 {
epochSpansCacheSize = size
}
cache, err := lru.NewWithEvict(epochSpansCacheSize, onEvicted)
if err != nil {
return nil, err
}
return &EpochSpansCache{cache: cache}, nil
}
// Get returns an ok bool and the cached value for the requested epoch key, if any.
func (c *EpochSpansCache) Get(epoch uint64) (map[uint64]types.Span, bool) {
item, exists := c.cache.Get(epoch)
if exists && item != nil {
epochSpansCacheHit.Inc()
return item.(map[uint64]types.Span), true
}
epochSpansCacheMiss.Inc()
return make(map[uint64]types.Span), false
}
// Set the response in the cache.
func (c *EpochSpansCache) Set(epoch uint64, epochSpans map[uint64]types.Span) {
_ = c.cache.Add(epoch, epochSpans)
}
// Delete removes an epoch from the cache and returns if it existed or not.
// Performs the onEviction function before removal.
func (c *EpochSpansCache) Delete(epoch uint64) bool {
return c.cache.Remove(epoch)
}
// Has returns true if the key exists in the cache.
func (c *EpochSpansCache) Has(epoch uint64) bool {
return c.cache.Contains(epoch)
}
// Clear removes all keys from the SpanCache.
func (c *EpochSpansCache) Clear() {
c.cache.Purge()
}

View File

@@ -62,7 +62,7 @@ type WriteAccessDatabase interface {
// MinMaxSpan related methods.
SaveEpochSpansMap(ctx context.Context, epoch uint64, spanMap map[uint64]detectionTypes.Span) error
SaveValidatorEpochSpans(ctx context.Context, validatorIdx uint64, epoch uint64, spans detectionTypes.Span) error
SaveValidatorEpochSpan(ctx context.Context, validatorIdx uint64, epoch uint64, spans detectionTypes.Span) error
SaveCachedSpansMaps(ctx context.Context) error
DeleteEpochSpans(ctx context.Context, validatorIdx uint64) error
DeleteValidatorSpanByEpoch(ctx context.Context, validatorIdx uint64, epoch uint64) error

View File

@@ -19,12 +19,14 @@ go_library(
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//slasher/cache:go_default_library",
"//slasher/db/types:go_default_library",
"//slasher/detection/attestations/types:go_default_library",
"@com_github_boltdb_bolt//:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",

View File

@@ -38,7 +38,7 @@ func unmarshalAttSlashings(encoded [][]byte) ([]*ethpb.AttesterSlashing, error)
// AttesterSlashings accepts a status and returns all slashings with this status.
// returns empty []*ethpb.AttesterSlashing if no slashing has been found with this status.
func (db *Store) AttesterSlashings(ctx context.Context, status types.SlashingStatus) ([]*ethpb.AttesterSlashing, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.AttesterSlashings")
ctx, span := trace.StartSpan(ctx, "slasherDB.AttesterSlashings")
defer span.End()
encoded := make([][]byte, 0)
err := db.view(func(tx *bolt.Tx) error {
@@ -59,7 +59,7 @@ func (db *Store) AttesterSlashings(ctx context.Context, status types.SlashingSta
// DeleteAttesterSlashing deletes an attester slashing proof from db.
func (db *Store) DeleteAttesterSlashing(ctx context.Context, attesterSlashing *ethpb.AttesterSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteAttesterSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteAttesterSlashing")
defer span.End()
root, err := hashutil.HashProto(attesterSlashing)
if err != nil {
@@ -80,7 +80,7 @@ func (db *Store) DeleteAttesterSlashing(ctx context.Context, attesterSlashing *e
// HasAttesterSlashing returns true and slashing status if a slashing is found in the db.
func (db *Store) HasAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) (bool, types.SlashingStatus, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.HasAttesterSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.HasAttesterSlashing")
defer span.End()
var status types.SlashingStatus
var found bool
@@ -103,7 +103,7 @@ func (db *Store) HasAttesterSlashing(ctx context.Context, slashing *ethpb.Attest
// SaveAttesterSlashing accepts a slashing proof and its status and writes it to disk.
func (db *Store) SaveAttesterSlashing(ctx context.Context, status types.SlashingStatus, slashing *ethpb.AttesterSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveAttesterSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveAttesterSlashing")
defer span.End()
enc, err := proto.Marshal(slashing)
if err != nil {
@@ -120,7 +120,7 @@ func (db *Store) SaveAttesterSlashing(ctx context.Context, status types.Slashing
// SaveAttesterSlashings accepts a slice of slashing proof and its status and writes it to disk.
func (db *Store) SaveAttesterSlashings(ctx context.Context, status types.SlashingStatus, slashings []*ethpb.AttesterSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveAttesterSlashings")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveAttesterSlashings")
defer span.End()
enc := make([][]byte, len(slashings))
key := make([][]byte, len(slashings))
@@ -148,7 +148,7 @@ func (db *Store) SaveAttesterSlashings(ctx context.Context, status types.Slashin
// GetLatestEpochDetected returns the latest detected epoch from db.
func (db *Store) GetLatestEpochDetected(ctx context.Context) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.GetLatestEpochDetected")
ctx, span := trace.StartSpan(ctx, "slasherDB.GetLatestEpochDetected")
defer span.End()
var epoch uint64
err := db.view(func(tx *bolt.Tx) error {
@@ -166,7 +166,7 @@ func (db *Store) GetLatestEpochDetected(ctx context.Context) (uint64, error) {
// SetLatestEpochDetected sets the latest slashing detected epoch in db.
func (db *Store) SetLatestEpochDetected(ctx context.Context, epoch uint64) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SetLatestEpochDetected")
ctx, span := trace.StartSpan(ctx, "slasherDB.SetLatestEpochDetected")
defer span.End()
return db.update(func(tx *bolt.Tx) error {
b := tx.Bucket(slashingBucket)

View File

@@ -14,7 +14,7 @@ import (
)
func unmarshalBlockHeader(ctx context.Context, enc []byte) (*ethpb.SignedBeaconBlockHeader, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.unmarshalBlockHeader")
ctx, span := trace.StartSpan(ctx, "slasherDB.unmarshalBlockHeader")
defer span.End()
protoBlockHeader := &ethpb.SignedBeaconBlockHeader{}
err := proto.Unmarshal(enc, protoBlockHeader)
@@ -27,7 +27,7 @@ func unmarshalBlockHeader(ctx context.Context, enc []byte) (*ethpb.SignedBeaconB
// BlockHeaders accepts an epoch and validator id and returns the corresponding block header array.
// Returns nil if the block header for those values does not exist.
func (db *Store) BlockHeaders(ctx context.Context, epoch uint64, validatorID uint64) ([]*ethpb.SignedBeaconBlockHeader, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.BlockHeaders")
ctx, span := trace.StartSpan(ctx, "slasherDB.BlockHeaders")
defer span.End()
var blockHeaders []*ethpb.SignedBeaconBlockHeader
err := db.view(func(tx *bolt.Tx) error {
@@ -47,7 +47,7 @@ func (db *Store) BlockHeaders(ctx context.Context, epoch uint64, validatorID uin
// HasBlockHeader accepts an epoch and validator id and returns true if the block header exists.
func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID uint64) bool {
ctx, span := trace.StartSpan(ctx, "SlasherDB.HasBlockHeader")
ctx, span := trace.StartSpan(ctx, "slasherDB.HasBlockHeader")
defer span.End()
prefix := encodeEpochValidatorID(epoch, validatorID)
var hasBlockHeader bool
@@ -67,7 +67,7 @@ func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID u
// SaveBlockHeader accepts a block header and writes it to disk.
func (db *Store) SaveBlockHeader(ctx context.Context, epoch uint64, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveBlockHeader")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveBlockHeader")
defer span.End()
key := encodeEpochValidatorIDSig(epoch, validatorID, blockHeader.Signature)
enc, err := proto.Marshal(blockHeader)
@@ -96,7 +96,7 @@ func (db *Store) SaveBlockHeader(ctx context.Context, epoch uint64, validatorID
// DeleteBlockHeader deletes a block header using the epoch and validator id.
func (db *Store) DeleteBlockHeader(ctx context.Context, epoch uint64, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteBlockHeader")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteBlockHeader")
defer span.End()
key := encodeEpochValidatorIDSig(epoch, validatorID, blockHeader.Signature)
return db.update(func(tx *bolt.Tx) error {
@@ -110,7 +110,7 @@ func (db *Store) DeleteBlockHeader(ctx context.Context, epoch uint64, validatorI
// PruneBlockHistory leaves only records younger then history size.
func (db *Store) PruneBlockHistory(ctx context.Context, currentEpoch uint64, pruningEpochAge uint64) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.pruneBlockHistory")
ctx, span := trace.StartSpan(ctx, "slasherDB.pruneBlockHistory")
defer span.End()
pruneTill := int64(currentEpoch) - int64(pruningEpochAge)
if pruneTill <= 0 {

View File

@@ -12,7 +12,7 @@ import (
// ChainHead retrieves the persisted chain head from the database accordingly.
func (db *Store) ChainHead(ctx context.Context) (*ethpb.ChainHead, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.ChainHead")
ctx, span := trace.StartSpan(ctx, "slasherDB.ChainHead")
defer span.End()
var res *ethpb.ChainHead
if err := db.update(func(tx *bolt.Tx) error {
@@ -31,7 +31,7 @@ func (db *Store) ChainHead(ctx context.Context) (*ethpb.ChainHead, error) {
// SaveChainHead accepts a beacon chain head object and persists it to the DB.
func (db *Store) SaveChainHead(ctx context.Context, head *ethpb.ChainHead) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveChainHead")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveChainHead")
defer span.End()
enc, err := proto.Marshal(head)
if err != nil {

View File

@@ -13,7 +13,7 @@ import (
)
func unmarshalIndexedAttestation(ctx context.Context, enc []byte) (*ethpb.IndexedAttestation, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.unmarshalIndexedAttestation")
ctx, span := trace.StartSpan(ctx, "slasherDB.unmarshalIndexedAttestation")
defer span.End()
protoIdxAtt := &ethpb.IndexedAttestation{}
err := proto.Unmarshal(enc, protoIdxAtt)
@@ -27,7 +27,7 @@ func unmarshalIndexedAttestation(ctx context.Context, enc []byte) (*ethpb.Indexe
// indexed attestations.
// Returns nil if the indexed attestation does not exist with that target epoch.
func (db *Store) IndexedAttestationsForTarget(ctx context.Context, targetEpoch uint64) ([]*ethpb.IndexedAttestation, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.IndexedAttestationsForTarget")
ctx, span := trace.StartSpan(ctx, "slasherDB.IndexedAttestationsForTarget")
defer span.End()
var idxAtts []*ethpb.IndexedAttestation
key := bytesutil.Bytes8(targetEpoch)
@@ -48,7 +48,7 @@ func (db *Store) IndexedAttestationsForTarget(ctx context.Context, targetEpoch u
// IndexedAttestationsWithPrefix accepts a target epoch and signature bytes to find all attestations with the requested prefix.
// Returns nil if the indexed attestation does not exist with that target epoch.
func (db *Store) IndexedAttestationsWithPrefix(ctx context.Context, targetEpoch uint64, sigBytes []byte) ([]*ethpb.IndexedAttestation, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.IndexedAttestationsWithPrefix")
ctx, span := trace.StartSpan(ctx, "slasherDB.IndexedAttestationsWithPrefix")
defer span.End()
var idxAtts []*ethpb.IndexedAttestation
key := encodeEpochSig(targetEpoch, sigBytes[:])
@@ -68,7 +68,7 @@ func (db *Store) IndexedAttestationsWithPrefix(ctx context.Context, targetEpoch
// HasIndexedAttestation accepts an attestation and returns true if it exists in the DB.
func (db *Store) HasIndexedAttestation(ctx context.Context, att *ethpb.IndexedAttestation) (bool, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.HasIndexedAttestation")
ctx, span := trace.StartSpan(ctx, "slasherDB.HasIndexedAttestation")
defer span.End()
key := encodeEpochSig(att.Data.Target.Epoch, att.Signature)
var hasAttestation bool
@@ -88,7 +88,7 @@ func (db *Store) HasIndexedAttestation(ctx context.Context, att *ethpb.IndexedAt
// SaveIndexedAttestation accepts an indexed attestation and writes it to the DB.
func (db *Store) SaveIndexedAttestation(ctx context.Context, idxAttestation *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveIndexedAttestation")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveIndexedAttestation")
defer span.End()
key := encodeEpochSig(idxAttestation.Data.Target.Epoch, idxAttestation.Signature)
enc, err := proto.Marshal(idxAttestation)
@@ -113,7 +113,7 @@ func (db *Store) SaveIndexedAttestation(ctx context.Context, idxAttestation *eth
// SaveIndexedAttestations accepts multiple indexed attestations and writes them to the DB.
func (db *Store) SaveIndexedAttestations(ctx context.Context, idxAttestations []*ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveIndexedAttestations")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveIndexedAttestations")
defer span.End()
keys := make([][]byte, len(idxAttestations))
marshaledAtts := make([][]byte, len(idxAttestations))
@@ -145,7 +145,7 @@ func (db *Store) SaveIndexedAttestations(ctx context.Context, idxAttestations []
// DeleteIndexedAttestation deletes a indexed attestation using the slot and its root as keys in their respective buckets.
func (db *Store) DeleteIndexedAttestation(ctx context.Context, idxAttestation *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteIndexedAttestation")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteIndexedAttestation")
defer span.End()
key := encodeEpochSig(idxAttestation.Data.Target.Epoch, idxAttestation.Signature)
return db.update(func(tx *bolt.Tx) error {
@@ -163,7 +163,7 @@ func (db *Store) DeleteIndexedAttestation(ctx context.Context, idxAttestation *e
// PruneAttHistory removes all attestations from the DB older than the pruning epoch age.
func (db *Store) PruneAttHistory(ctx context.Context, currentEpoch uint64, pruningEpochAge uint64) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.pruneAttHistory")
ctx, span := trace.StartSpan(ctx, "slasherDB.pruneAttHistory")
defer span.End()
pruneFromEpoch := int64(currentEpoch) - int64(pruningEpochAge)
if pruneFromEpoch <= 0 {
@@ -186,7 +186,7 @@ func (db *Store) PruneAttHistory(ctx context.Context, currentEpoch uint64, pruni
// LatestIndexedAttestationsTargetEpoch returns latest target epoch in db
// returns 0 if there is no indexed attestations in db.
func (db *Store) LatestIndexedAttestationsTargetEpoch(ctx context.Context) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.LatestIndexedAttestationsTargetEpoch")
ctx, span := trace.StartSpan(ctx, "slasherDB.LatestIndexedAttestationsTargetEpoch")
defer span.End()
var lt uint64
err := db.view(func(tx *bolt.Tx) error {

View File

@@ -6,8 +6,8 @@ import (
"time"
"github.com/boltdb/bolt"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/slasher/cache"
)
var databaseFileName = "slasher.db"
@@ -17,7 +17,7 @@ var databaseFileName = "slasher.db"
type Store struct {
db *bolt.DB
databasePath string
spanCache *ristretto.Cache
spanCache *cache.EpochSpansCache
spanCacheEnabled bool
}
@@ -25,8 +25,7 @@ type Store struct {
type Config struct {
// SpanCacheEnabled uses span cache to detect surround slashing.
SpanCacheEnabled bool
CacheItems int64
MaxCacheSize int64
SpanCacheSize int
}
// Close closes the underlying boltdb database.
@@ -34,7 +33,7 @@ func (db *Store) Close() error {
return db.db.Close()
}
// ClearSpanCache clears the MinMaxSpans cache.
// ClearSpanCache clears the spans cache.
func (db *Store) ClearSpanCache() {
db.spanCache.Clear()
}
@@ -86,21 +85,10 @@ func NewKVStore(dirPath string, cfg *Config) (*Store, error) {
}
return nil, err
}
if cfg.CacheItems == 0 {
cfg.CacheItems = 10 * cachedSpanerEpochs
}
if cfg.MaxCacheSize == 0 {
cfg.MaxCacheSize = cachedSpanerEpochs
}
kv := &Store{db: boltDB, databasePath: datafile, spanCacheEnabled: cfg.SpanCacheEnabled}
spanCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cfg.CacheItems, // number of keys to track frequency of (10M).
MaxCost: cfg.MaxCacheSize, // maximum cost of cache.
BufferItems: 64, // number of keys per Get buffer.
OnEvict: persistSpanMapsOnEviction(kv),
})
spanCache, err := cache.NewEpochSpansCache(cfg.SpanCacheSize, persistSpanMapsOnEviction(kv))
if err != nil {
return nil, errors.Wrap(err, "failed to start span cache")
return nil, errors.Wrap(err, "could not create new cache")
}
kv.spanCache = spanCache
@@ -120,6 +108,7 @@ func NewKVStore(dirPath string, cfg *Config) (*Store, error) {
}); err != nil {
return nil, err
}
return kv, err
}

View File

@@ -23,7 +23,7 @@ func setupDB(t testing.TB, ctx *cli.Context) *Store {
if err := os.RemoveAll(p); err != nil {
t.Fatalf("Failed to remove directory: %v", err)
}
cfg := &Config{CacheItems: 0, MaxCacheSize: 0, SpanCacheEnabled: ctx.GlobalBool(flags.UseSpanCacheFlag.Name)}
cfg := &Config{SpanCacheEnabled: ctx.GlobalBool(flags.UseSpanCacheFlag.Name)}
db, err := NewKVStore(p, cfg)
if err != nil {
t.Fatalf("Failed to instantiate DB: %v", err)
@@ -31,7 +31,7 @@ func setupDB(t testing.TB, ctx *cli.Context) *Store {
return db
}
func setupDBDiffCacheSize(t testing.TB, cacheItems int64, maxCacheSize int64) *Store {
func setupDBDiffCacheSize(t testing.TB, cacheSize int) *Store {
randPath, err := rand.Int(rand.Reader, big.NewInt(1000000))
if err != nil {
t.Fatalf("Could not generate random file path: %v", err)
@@ -40,7 +40,7 @@ func setupDBDiffCacheSize(t testing.TB, cacheItems int64, maxCacheSize int64) *S
if err := os.RemoveAll(p); err != nil {
t.Fatalf("Failed to remove directory: %v", err)
}
cfg := &Config{CacheItems: cacheItems, MaxCacheSize: maxCacheSize, SpanCacheEnabled: true}
cfg := &Config{SpanCacheEnabled: true, SpanCacheSize: cacheSize}
newDB, err := NewKVStore(p, cfg)
if err != nil {
t.Fatalf("Failed to instantiate DB: %v", err)

View File

@@ -14,7 +14,7 @@ import (
)
func unmarshalProposerSlashing(ctx context.Context, enc []byte) (*ethpb.ProposerSlashing, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.unmarshalProposerSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.unmarshalProposerSlashing")
defer span.End()
protoSlashing := &ethpb.ProposerSlashing{}
if err := proto.Unmarshal(enc, protoSlashing); err != nil {
@@ -24,7 +24,7 @@ func unmarshalProposerSlashing(ctx context.Context, enc []byte) (*ethpb.Proposer
}
func unmarshalProposerSlashingArray(ctx context.Context, encoded [][]byte) ([]*ethpb.ProposerSlashing, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.unmarshalProposerSlashingArray")
ctx, span := trace.StartSpan(ctx, "slasherDB.unmarshalProposerSlashingArray")
defer span.End()
proposerSlashings := make([]*ethpb.ProposerSlashing, len(encoded))
for i, enc := range encoded {
@@ -39,7 +39,7 @@ func unmarshalProposerSlashingArray(ctx context.Context, encoded [][]byte) ([]*e
// ProposalSlashingsByStatus returns all the proposal slashing proofs with a certain status.
func (db *Store) ProposalSlashingsByStatus(ctx context.Context, status types.SlashingStatus) ([]*ethpb.ProposerSlashing, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.ProposalSlashingsByStatus")
ctx, span := trace.StartSpan(ctx, "slasherDB.ProposalSlashingsByStatus")
defer span.End()
encoded := make([][]byte, 0)
err := db.view(func(tx *bolt.Tx) error {
@@ -60,7 +60,7 @@ func (db *Store) ProposalSlashingsByStatus(ctx context.Context, status types.Sla
// DeleteProposerSlashing deletes a proposer slashing proof.
func (db *Store) DeleteProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteProposerSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteProposerSlashing")
defer span.End()
root, err := hashutil.HashProto(slashing)
if err != nil {
@@ -79,7 +79,7 @@ func (db *Store) DeleteProposerSlashing(ctx context.Context, slashing *ethpb.Pro
// HasProposerSlashing returns the slashing key if it is found in db.
func (db *Store) HasProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) (bool, types.SlashingStatus, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.HasProposerSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.HasProposerSlashing")
defer span.End()
var status types.SlashingStatus
var found bool
@@ -103,7 +103,7 @@ func (db *Store) HasProposerSlashing(ctx context.Context, slashing *ethpb.Propos
// SaveProposerSlashing accepts a proposer slashing and its status header and writes it to disk.
func (db *Store) SaveProposerSlashing(ctx context.Context, status types.SlashingStatus, slashing *ethpb.ProposerSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveProposerSlashing")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveProposerSlashing")
defer span.End()
enc, err := proto.Marshal(slashing)
if err != nil {
@@ -120,7 +120,7 @@ func (db *Store) SaveProposerSlashing(ctx context.Context, status types.Slashing
// SaveProposerSlashings accepts a slice of slashing proof and its status and writes it to disk.
func (db *Store) SaveProposerSlashings(ctx context.Context, status types.SlashingStatus, slashings []*ethpb.ProposerSlashing) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveProposerSlashings")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveProposerSlashings")
defer span.End()
encSlashings := make([][]byte, len(slashings))
keys := make([][]byte, len(slashings))

View File

@@ -2,64 +2,78 @@ package kv
import (
"context"
"fmt"
"reflect"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/types"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// Tracks the highest observed epoch from the validator span maps
// Tracks the highest and lowest observed epochs from the validator span maps
// used for attester slashing detection. This value is purely used
// as a cache key and only needs to be maintained in memory.
var highestObservedEpoch uint64
var lowestObservedEpoch = params.BeaconConfig().FarFutureEpoch
func cacheTypeMismatchError(value interface{}) error {
return fmt.Errorf("cache contains a value of type: %v "+
"while expected to contain only values of type : map[uint64]types.Span", reflect.TypeOf(value))
}
var (
slasherLowestObservedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "slasher_lowest_observed_epoch",
Help: "The lowest epoch number seen by slasher",
})
slasherHighestObservedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "slasher_highest_observed_epoch",
Help: "The highest epoch number seen by slasher",
})
epochSpansCacheEvictions = promauto.NewCounter(prometheus.CounterOpts{
Name: "epoch_spans_cache_evictions_total",
Help: "The number of cache evictions seen by slasher",
})
)
// This function defines a function which triggers upon a span map being
// evicted from the cache. It allows us to persist the span map by the epoch value
// to the database itself in the validatorsMinMaxSpanBucket.
func persistSpanMapsOnEviction(db *Store) func(uint64, uint64, interface{}, int64) {
func persistSpanMapsOnEviction(db *Store) func(key interface{}, value interface{}) {
// We use a closure here so we can access the database itself
// on the eviction of a span map from the cache. The function has the signature
// required by the ristretto cache OnEvict method.
// See https://godoc.org/github.com/dgraph-io/ristretto#Config.
return func(epoch uint64, _ uint64, value interface{}, cost int64) {
log.Tracef("evicting span map for epoch: %d", epoch)
return func(key interface{}, value interface{}) {
log.Tracef("Evicting span map for epoch: %d", key)
err := db.update(func(tx *bolt.Tx) error {
epoch, keyOK := key.(uint64)
spanMap, valueOK := value.(map[uint64]types.Span)
if !keyOK || !valueOK {
return errors.New("could not cast key and value into needed types")
}
bucket := tx.Bucket(validatorsMinMaxSpanBucket)
epochBucket, err := bucket.CreateBucketIfNotExists(bytesutil.Bytes8(epoch))
if err != nil {
return err
}
spanMap, ok := value.(map[uint64]types.Span)
if !ok {
return cacheTypeMismatchError(value)
}
for k, v := range spanMap {
err = epochBucket.Put(bytesutil.Bytes8(k), marshalSpan(v))
if err != nil {
if err = epochBucket.Put(bytesutil.Bytes8(k), marshalSpan(v)); err != nil {
return err
}
}
epochSpansCacheEvictions.Inc()
return nil
})
if err != nil {
log.Errorf("failed to save span map to db on cache eviction: %v", err)
log.Errorf("Failed to save span map to db on cache eviction: %v", err)
}
}
}
// Unmarshal a span map from an encoded, flattened array.
func unmarshalSpan(ctx context.Context, enc []byte) (types.Span, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.unmarshalSpan")
ctx, span := trace.StartSpan(ctx, "slasherDB.unmarshalSpan")
defer span.End()
r := types.Span{}
if len(enc) != spannerEncodedLength {
@@ -89,19 +103,15 @@ func marshalSpan(span types.Span) []byte {
// enabled and the epoch key exists. Returns nil if the span map
// for this validator index does not exist.
func (db *Store) EpochSpansMap(ctx context.Context, epoch uint64) (map[uint64]types.Span, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.EpochSpansMap")
ctx, span := trace.StartSpan(ctx, "slasherDB.EpochSpansMap")
defer span.End()
if db.spanCacheEnabled {
v, ok := db.spanCache.Get(epoch)
spanMap := make(map[uint64]types.Span)
spanMap, ok := db.spanCache.Get(epoch)
if ok {
spanMap, ok = v.(map[uint64]types.Span)
if !ok {
return nil, cacheTypeMismatchError(v)
}
return spanMap, nil
}
}
var err error
var spanMap map[uint64]types.Span
err = db.view(func(tx *bolt.Tx) error {
@@ -134,25 +144,23 @@ func (db *Store) EpochSpansMap(ctx context.Context, epoch uint64) (map[uint64]ty
// when caching is enabled.
// Returns error if the spans for this validator index and epoch does not exist.
func (db *Store) EpochSpanByValidatorIndex(ctx context.Context, validatorIdx uint64, epoch uint64) (types.Span, error) {
ctx, span := trace.StartSpan(ctx, "SlasherDB.EpochSpanByValidatorIndex")
ctx, span := trace.StartSpan(ctx, "slasherDB.EpochSpanByValidatorIndex")
defer span.End()
var err error
if db.spanCacheEnabled {
v, ok := db.spanCache.Get(epoch)
spanMap := make(map[uint64]types.Span)
if ok {
spanMap, ok = v.(map[uint64]types.Span)
if !ok {
return types.Span{}, cacheTypeMismatchError(v)
}
spans, ok := spanMap[validatorIdx]
if ok {
return spans, nil
}
setObservedEpochs(epoch)
spanMap, err := db.findOrLoadEpochInCache(ctx, epoch)
if err != nil {
return types.Span{}, err
}
spans, ok := spanMap[validatorIdx]
if ok {
return spans, nil
}
return types.Span{}, nil
}
var spans types.Span
err = db.view(func(tx *bolt.Tx) error {
err := db.view(func(tx *bolt.Tx) error {
b := tx.Bucket(validatorsMinMaxSpanBucket)
epochBucket := b.Bucket(bytesutil.Bytes8(epoch))
if epochBucket == nil {
@@ -173,38 +181,29 @@ func (db *Store) EpochSpanByValidatorIndex(ctx context.Context, validatorIdx uin
return spans, err
}
// SaveValidatorEpochSpans accepts validator index epoch and spans returns.
// SaveValidatorEpochSpan accepts validator index epoch and spans returns.
// it reads the epoch spans from cache, updates it and save it back to cache
// if caching is enabled.
// Returns error if the spans for this validator index and epoch does not exist.
func (db *Store) SaveValidatorEpochSpans(
func (db *Store) SaveValidatorEpochSpan(
ctx context.Context,
validatorIdx uint64,
epoch uint64,
spans types.Span,
span types.Span,
) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveValidatorEpochSpans")
defer span.End()
defer span.End()
ctx, traceSpan := trace.StartSpan(ctx, "slasherDB.SaveValidatorEpochSpan")
defer traceSpan.End()
if db.spanCacheEnabled {
if epoch > highestObservedEpoch {
highestObservedEpoch = epoch
}
v, ok := db.spanCache.Get(epoch)
spanMap := make(map[uint64]types.Span)
if ok {
spanMap, ok = v.(map[uint64]types.Span)
if !ok {
return cacheTypeMismatchError(v)
}
}
spanMap[validatorIdx] = spans
saved := db.spanCache.Set(epoch, spanMap, 1)
if !saved {
return fmt.Errorf("failed to save span map to cache")
setObservedEpochs(epoch)
spanMap, err := db.findOrLoadEpochInCache(ctx, epoch)
if err != nil {
return err
}
spanMap[validatorIdx] = span
db.spanCache.Set(epoch, spanMap)
return nil
}
return db.update(func(tx *bolt.Tx) error {
b := tx.Bucket(validatorsMinMaxSpanBucket)
epochBucket, err := b.CreateBucketIfNotExists(bytesutil.Bytes8(epoch))
@@ -212,27 +211,23 @@ func (db *Store) SaveValidatorEpochSpans(
return err
}
key := bytesutil.Bytes8(validatorIdx)
value := marshalSpan(spans)
value := marshalSpan(span)
return epochBucket.Put(key, value)
})
}
// SaveEpochSpansMap accepts a epoch and span map epoch=>spans and writes it to disk.
// saves the spans to cache if caching is enabled. The key in the cache is the highest
// epoch seen by slasher and the value is the span map itself.
// saves the spans to cache if caching is enabled. The key in the cache is the
// epoch and the value is the span map itself.
func (db *Store) SaveEpochSpansMap(ctx context.Context, epoch uint64, spanMap map[uint64]types.Span) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveEpochSpansMap")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveEpochSpansMap")
defer span.End()
if db.spanCacheEnabled {
if epoch > highestObservedEpoch {
highestObservedEpoch = epoch
}
saved := db.spanCache.Set(epoch, spanMap, 1)
if !saved {
return fmt.Errorf("failed to save span map to cache")
}
setObservedEpochs(epoch)
db.spanCache.Set(epoch, spanMap)
return nil
}
return db.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(validatorsMinMaxSpanBucket)
valBucket, err := bucket.CreateBucketIfNotExists(bytesutil.Bytes8(epoch))
@@ -256,38 +251,34 @@ func (db *Store) enableSpanCache(enable bool) {
// SaveCachedSpansMaps saves all span maps that are currently
// in memory into the DB. if no span maps are in db or cache is disabled it returns nil.
func (db *Store) SaveCachedSpansMaps(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.SaveCachedSpansMaps")
ctx, span := trace.StartSpan(ctx, "slasherDB.SaveCachedSpansMaps")
defer span.End()
if db.spanCacheEnabled {
db.enableSpanCache(false)
defer db.enableSpanCache(true)
for epoch := uint64(0); epoch <= highestObservedEpoch; epoch++ {
v, ok := db.spanCache.Get(epoch)
for epoch := lowestObservedEpoch; epoch <= highestObservedEpoch; epoch++ {
spanMap, ok := db.spanCache.Get(epoch)
if ok {
spanMap, ok := v.(map[uint64]types.Span)
if !ok {
return cacheTypeMismatchError(v)
}
if err := db.SaveEpochSpansMap(ctx, epoch, spanMap); err != nil {
return errors.Wrap(err, "failed to save span maps from cache")
}
}
}
// Reset the observed epochs after saving to the DB.
lowestObservedEpoch = params.BeaconConfig().FarFutureEpoch
highestObservedEpoch = 0
log.Debugf("Epochs %d to %d have been saved", lowestObservedEpoch, highestObservedEpoch)
}
return nil
}
// DeleteEpochSpans deletes a epochs validators span map using a epoch index as bucket key.
func (db *Store) DeleteEpochSpans(ctx context.Context, epoch uint64) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteEpochSpans")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteEpochSpans")
defer span.End()
if db.spanCacheEnabled {
_, ok := db.spanCache.Get(epoch)
if ok {
db.spanCache.Del(epoch)
return nil
}
_ = db.spanCache.Delete(epoch)
return nil
}
return db.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(validatorsMinMaxSpanBucket)
@@ -300,24 +291,17 @@ func (db *Store) DeleteEpochSpans(ctx context.Context, epoch uint64) error {
// deletes spans from cache if caching is enabled.
// using a validator index as bucket key.
func (db *Store) DeleteValidatorSpanByEpoch(ctx context.Context, validatorIdx uint64, epoch uint64) error {
ctx, span := trace.StartSpan(ctx, "SlasherDB.DeleteValidatorSpanByEpoch")
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteValidatorSpanByEpoch")
defer span.End()
if db.spanCacheEnabled {
v, ok := db.spanCache.Get(epoch)
spanMap := make(map[uint64][2]uint16)
spanMap, ok := db.spanCache.Get(epoch)
if ok {
spanMap, ok = v.(map[uint64][2]uint16)
if !ok {
return cacheTypeMismatchError(v)
}
delete(spanMap, validatorIdx)
db.spanCache.Set(epoch, spanMap)
return nil
}
delete(spanMap, validatorIdx)
saved := db.spanCache.Set(epoch, spanMap, 1)
if !saved {
return errors.New("failed to save span map to cache")
}
return nil
}
return db.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(validatorsMinMaxSpanBucket)
e := bytesutil.Bytes8(epoch)
@@ -326,3 +310,34 @@ func (db *Store) DeleteValidatorSpanByEpoch(ctx context.Context, validatorIdx ui
return epochBucket.Delete(v)
})
}
// findOrLoadEpochInCache checks if the requested epoch is in the cache, and if not, we load it from the DB.
func (db *Store) findOrLoadEpochInCache(ctx context.Context, epoch uint64) (map[uint64]types.Span, error) {
ctx, span := trace.StartSpan(ctx, "slasherDB.findOrLoadEpochInCache")
defer span.End()
spanMap, epochFound := db.spanCache.Get(epoch)
if epochFound {
return spanMap, nil
}
db.enableSpanCache(false)
defer db.enableSpanCache(true)
// If the epoch we want isn't in the cache, load it in.
spanForEpoch, err := db.EpochSpansMap(ctx, epoch)
if err != nil {
return make(map[uint64]types.Span), errors.Wrap(err, "failed to get span map for epoch")
}
db.spanCache.Set(epoch, spanForEpoch)
return spanForEpoch, nil
}
func setObservedEpochs(epoch uint64) {
if epoch > highestObservedEpoch {
slasherHighestObservedEpoch.Set(float64(epoch))
highestObservedEpoch = epoch
}
if epoch < lowestObservedEpoch {
slasherLowestObservedEpoch.Set(float64(epoch))
lowestObservedEpoch = epoch
}
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"flag"
"reflect"
"strings"
"testing"
"time"
@@ -96,31 +95,6 @@ func TestStore_SaveSpans(t *testing.T) {
}
}
func TestStore_WrongTypeInCache(t *testing.T) {
app := cli.NewApp()
set := flag.NewFlagSet("test", 0)
set.Bool(flags.UseSpanCacheFlag.Name, true, "enable span map cache")
db := setupDB(t, cli.NewContext(app, set, nil))
defer teardownDB(t, db)
ctx := context.Background()
for _, tt := range spanTests {
db.spanCache.Set(tt.epoch, []byte{0, 0}, 1)
// wait for value to pass through cache buffers
time.Sleep(time.Millisecond * 10)
_, err := db.EpochSpansMap(ctx, tt.epoch)
if err == nil || !strings.Contains(err.Error(), "cache contains a value of type") {
t.Fatalf("expected error type in cache : %v", err)
}
_, err = db.EpochSpanByValidatorIndex(ctx, 1, tt.epoch)
if err == nil || !strings.Contains(err.Error(), "cache contains a value of type") {
t.Fatalf("expected error type in cache : %v", err)
}
}
}
func TestStore_SaveCachedSpans(t *testing.T) {
app := cli.NewApp()
set := flag.NewFlagSet("test", 0)
@@ -190,7 +164,7 @@ func TestStore_DeleteEpochSpans(t *testing.T) {
}
}
func TestValidatorSpanMap_DeleteWithCache(t *testing.T) {
func TestValidatorSpanMap_DeletesOnCacheSavesToDB(t *testing.T) {
app := cli.NewApp()
set := flag.NewFlagSet("test", 0)
set.Bool(flags.UseSpanCacheFlag.Name, true, "enable span map cache")
@@ -204,34 +178,36 @@ func TestValidatorSpanMap_DeleteWithCache(t *testing.T) {
t.Fatalf("Save validator span map failed: %v", err)
}
}
// wait for value to pass through cache buffers
// Wait for value to pass through cache buffers.
time.Sleep(time.Millisecond * 10)
for _, tt := range spanTests {
sm, err := db.EpochSpansMap(ctx, tt.epoch)
spanMap, err := db.EpochSpansMap(ctx, tt.epoch)
if err != nil {
t.Fatalf("Failed to get validator span map: %v", err)
}
if sm == nil || !reflect.DeepEqual(sm, tt.spanMap) {
t.Fatalf("Get should return validator span map: %v got: %v", tt.spanMap, sm)
if spanMap == nil || !reflect.DeepEqual(spanMap, tt.spanMap) {
t.Fatalf("Get should return validator span map: %v got: %v", tt.spanMap, spanMap)
}
err = db.DeleteEpochSpans(ctx, tt.epoch)
if err != nil {
if err = db.DeleteEpochSpans(ctx, tt.epoch); err != nil {
t.Fatalf("Delete validator span map error: %v", err)
}
// wait for value to pass through cache buffers
// Wait for value to pass through cache buffers.
db.enableSpanCache(false)
time.Sleep(time.Millisecond * 10)
sm, err = db.EpochSpansMap(ctx, tt.epoch)
spanMap, err = db.EpochSpansMap(ctx, tt.epoch)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(sm, map[uint64]types.Span{}) {
t.Errorf("Expected validator span map to be deleted, received: %v", sm)
db.enableSpanCache(true)
if !reflect.DeepEqual(spanMap, tt.spanMap) {
t.Errorf("Expected validator span map to be deleted, received: %v", spanMap)
}
}
}
func TestValidatorSpanMap_SaveOnEvict(t *testing.T) {
db := setupDBDiffCacheSize(t, 5, 5)
db := setupDBDiffCacheSize(t, 5)
defer teardownDB(t, db)
ctx := context.Background()
@@ -279,8 +255,7 @@ func TestValidatorSpanMap_SaveCachedSpansMaps(t *testing.T) {
}
// wait for value to pass through cache buffers
time.Sleep(time.Millisecond * 10)
err := db.SaveCachedSpansMaps(ctx)
if err != nil {
if err := db.SaveCachedSpansMaps(ctx); err != nil {
t.Errorf("Failed to save cached span maps to db: %v", err)
}
db.spanCache.Clear()

View File

@@ -23,7 +23,7 @@ func SetupSlasherDB(t testing.TB, spanCacheEnabled bool) *kv.Store {
if err := os.RemoveAll(p); err != nil {
t.Fatalf("Failed to remove directory: %v", err)
}
cfg := &kv.Config{CacheItems: 0, MaxCacheSize: 0, SpanCacheEnabled: spanCacheEnabled}
cfg := &kv.Config{SpanCacheEnabled: spanCacheEnabled}
db, err := slasherDB.NewDB(p, cfg)
if err != nil {
t.Fatalf("Failed to instantiate DB: %v", err)
@@ -41,7 +41,7 @@ func SetupSlasherDBDiffCacheSize(t testing.TB, cacheItems int64, maxCacheSize in
if err := os.RemoveAll(p); err != nil {
t.Fatalf("Failed to remove directory: %v", err)
}
cfg := &kv.Config{CacheItems: cacheItems, MaxCacheSize: maxCacheSize, SpanCacheEnabled: true}
cfg := &kv.Config{SpanCacheEnabled: true}
newDB, err := slasherDB.NewDB(p, cfg)
if err != nil {
t.Fatalf("Failed to instantiate DB: %v", err)

View File

@@ -5,21 +5,24 @@ go_library(
srcs = [
"detect.go",
"listeners.go",
"metrics.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/slasher/detection",
visibility = ["//slasher:__subpackages__"],
deps = [
"//shared/event:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/sliceutil:go_default_library",
"//slasher/beaconclient:go_default_library",
"//slasher/db:go_default_library",
"//slasher/db/types:go_default_library",
"//slasher/detection/attestations:go_default_library",
"//slasher/detection/attestations/iface:go_default_library",
"//slasher/detection/attestations/types:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
@@ -37,6 +40,7 @@ go_test(
"//shared/event:go_default_library",
"//shared/testutil:go_default_library",
"//slasher/db/testing:go_default_library",
"//slasher/db/types:go_default_library",
"//slasher/detection/attestations:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -9,10 +9,13 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/slasher/detection/attestations",
visibility = ["//slasher:__subpackages__"],
deps = [
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//slasher/db:go_default_library",
"//slasher/detection/attestations/iface:go_default_library",
"//slasher/detection/attestations/types:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -23,13 +26,8 @@ go_test(
srcs = ["spanner_test.go"],
embed = [":go_default_library"],
deps = [
"//shared/cmd:go_default_library",
"//shared/sliceutil:go_default_library",
"//slasher/db:go_default_library",
"//slasher/db/kv:go_default_library",
"//slasher/db/testing:go_default_library",
"//slasher/detection/attestations/types:go_default_library",
"//slasher/flags:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_urfave_cli//:go_default_library",
],
)

View File

@@ -10,11 +10,10 @@ import (
// SpanDetector defines an interface for Spanners to follow to allow mocks.
type SpanDetector interface {
// Read functions.
DetectSlashingForValidator(
DetectSlashingsForAttestation(
ctx context.Context,
validatorIdx uint64,
attData *ethpb.AttestationData,
) (*types.DetectionResult, error)
att *ethpb.IndexedAttestation,
) ([]*types.DetectionResult, error)
// Write functions.
UpdateSpans(ctx context.Context, att *ethpb.IndexedAttestation) error

View File

@@ -20,42 +20,45 @@ type MockSpanDetector struct {
lock sync.RWMutex
}
// DetectSlashingForValidator mocks a detected slashing, if the sent attestation data
// DetectSlashingsForAttestation mocks a detected slashing, if the sent attestation data
// has a source epoch of 0, nothing will be detected. If the sent attestation data has a target
// epoch equal to or greater than 6, it will "detect" a surrounded vote for the target epoch + 1.
// If the target epoch is greater than 12, it will "detect" a surrounding vote for target epoch - 1.
// Lastly, if it has a target epoch less than 6, it will "detect" a double vote for the target epoch.
func (s *MockSpanDetector) DetectSlashingForValidator(
func (s *MockSpanDetector) DetectSlashingsForAttestation(
ctx context.Context,
validatorIdx uint64,
attData *ethpb.AttestationData,
) (*types.DetectionResult, error) {
att *ethpb.IndexedAttestation,
) ([]*types.DetectionResult, error) {
var detections []*types.DetectionResult
switch {
// If the source epoch is 0, don't find a slashing.
case attData.Source.Epoch == 0:
case att.Data.Source.Epoch == 0:
return nil, nil
// If the target epoch is > 12, it will "detect" a surrounded saved attestation.
case attData.Target.Epoch > 12:
return &types.DetectionResult{
case att.Data.Target.Epoch > 12:
detections = append(detections, &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: attData.Target.Epoch - 1,
SlashableEpoch: att.Data.Target.Epoch - 1,
SigBytes: [2]byte{1, 2},
}, nil
})
return detections, nil
// If the target epoch is >= 6 < 12, it will "detect" a surrounding saved attestation.
case attData.Target.Epoch >= 6:
return &types.DetectionResult{
case att.Data.Target.Epoch >= 6:
detections = append(detections, &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: attData.Target.Epoch + 1,
SlashableEpoch: att.Data.Target.Epoch + 1,
SigBytes: [2]byte{1, 2},
}, nil
})
return detections, nil
// If the target epoch is less than 6, it will "detect" a double vote.
default:
return &types.DetectionResult{
detections = append(detections, &types.DetectionResult{
Kind: types.DoubleVote,
SlashableEpoch: attData.Target.Epoch,
SlashableEpoch: att.Data.Target.Epoch,
SigBytes: [2]byte{1, 2},
}, nil
})
}
return detections, nil
}
// SpanForEpochByValidator returns the specific min-max span for a

View File

@@ -4,7 +4,10 @@ import (
"context"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
db "github.com/prysmaticlabs/prysm/slasher/db"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/iface"
@@ -12,6 +15,22 @@ import (
"go.opencensus.io/trace"
)
var (
latestMinSpanDistanceObserved = promauto.NewGauge(prometheus.GaugeOpts{
Name: "latest_min_span_distance_observed",
Help: "The latest distance between target - source observed for min spans",
})
latestMaxSpanDistanceObserved = promauto.NewGauge(prometheus.GaugeOpts{
Name: "latest_max_span_distance_observed",
Help: "The latest distance between target - source observed for max spans",
})
)
// We look back 128 epochs when updating min/max spans
// for incoming attestations.
// TODO(#5040): Remove lookback and handle min spans properly.
const epochLookback = 128
var _ = iface.SpanDetector(&SpanDetector{})
// SpanDetector defines a struct which can detect slashable
@@ -30,18 +49,17 @@ func NewSpanDetector(db db.Database) *SpanDetector {
}
}
// DetectSlashingForValidator uses a validator index and its corresponding
// DetectSlashingsForAttestation uses a validator index and its corresponding
// min-max spans during an epoch to detect an epoch in which the validator
// committed a slashable attestation.
func (s *SpanDetector) DetectSlashingForValidator(
func (s *SpanDetector) DetectSlashingsForAttestation(
ctx context.Context,
validatorIdx uint64,
attData *ethpb.AttestationData,
) (*types.DetectionResult, error) {
ctx, traceSpan := trace.StartSpan(ctx, "detection.DetectSlashingForValidator")
att *ethpb.IndexedAttestation,
) ([]*types.DetectionResult, error) {
ctx, traceSpan := trace.StartSpan(ctx, "spanner.DetectSlashingsForAttestation")
defer traceSpan.End()
sourceEpoch := attData.Source.Epoch
targetEpoch := attData.Target.Epoch
sourceEpoch := att.Data.Source.Epoch
targetEpoch := att.Data.Target.Epoch
if (targetEpoch - sourceEpoch) > params.BeaconConfig().WeakSubjectivityPeriod {
return nil, fmt.Errorf(
"attestation span was greater than weak subjectivity period %d, received: %d",
@@ -49,132 +67,171 @@ func (s *SpanDetector) DetectSlashingForValidator(
targetEpoch-sourceEpoch,
)
}
spanMap, err := s.slasherDB.EpochSpansMap(ctx, sourceEpoch)
if err != nil {
return nil, err
}
targetSpanMap, err := s.slasherDB.EpochSpansMap(ctx, targetEpoch)
if err != nil {
return nil, err
}
var detections []*types.DetectionResult
distance := uint16(targetEpoch - sourceEpoch)
sp, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, validatorIdx, sourceEpoch)
if err != nil {
return nil, err
}
minSpan := sp.MinSpan
if minSpan > 0 && minSpan < distance {
slashableEpoch := sourceEpoch + uint64(minSpan)
span, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, validatorIdx, slashableEpoch)
if err != nil {
return nil, err
for _, idx := range att.AttestingIndices {
span := spanMap[idx]
minSpan := span.MinSpan
if minSpan > 0 && minSpan < distance {
slashableEpoch := sourceEpoch + uint64(minSpan)
targetSpan, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, idx, slashableEpoch)
if err != nil {
return nil, err
}
detections = append(detections, &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: slashableEpoch,
SigBytes: targetSpan.SigBytes,
})
continue
}
return &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: sourceEpoch + uint64(minSpan),
SigBytes: span.SigBytes,
}, nil
}
maxSpan := sp.MaxSpan
if maxSpan > distance {
slashableEpoch := sourceEpoch + uint64(maxSpan)
span, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, validatorIdx, slashableEpoch)
if err != nil {
return nil, err
maxSpan := span.MaxSpan
if maxSpan > distance {
slashableEpoch := sourceEpoch + uint64(maxSpan)
targetSpan, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, idx, slashableEpoch)
if err != nil {
return nil, err
}
detections = append(detections, &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: slashableEpoch,
SigBytes: targetSpan.SigBytes,
})
continue
}
targetSpan := targetSpanMap[idx]
// Check if the validator has attested for this epoch or not.
if targetSpan.HasAttested {
detections = append(detections, &types.DetectionResult{
Kind: types.DoubleVote,
SlashableEpoch: targetEpoch,
SigBytes: targetSpan.SigBytes,
})
continue
}
return &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: slashableEpoch,
SigBytes: span.SigBytes,
}, nil
}
sp, err = s.slasherDB.EpochSpanByValidatorIndex(ctx, validatorIdx, targetEpoch)
if err != nil {
return nil, err
}
// Check if the validator has attested for this epoch or not.
if sp.HasAttested {
return &types.DetectionResult{
Kind: types.DoubleVote,
SlashableEpoch: targetEpoch,
SigBytes: sp.SigBytes,
}, nil
// Clear out any duplicate results.
keys := make(map[[32]byte]bool)
var detectionList []*types.DetectionResult
for _, dd := range detections {
hash := hashutil.Hash(dd.Marshal())
if _, value := keys[hash]; !value {
keys[hash] = true
detectionList = append(detectionList, dd)
}
}
return nil, nil
return detectionList, nil
}
// UpdateSpans given an indexed attestation for all of its attesting indices.
func (s *SpanDetector) UpdateSpans(ctx context.Context, att *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "detection.UpdateSpans")
ctx, span := trace.StartSpan(ctx, "spanner.UpdateSpans")
defer span.End()
source := att.Data.Source.Epoch
target := att.Data.Target.Epoch
for i := 0; i < len(att.AttestingIndices); i++ {
valIdx := att.AttestingIndices[i]
// Save the signature for the received attestation so we can have more detail to find it in the DB.
err := s.saveSigBytes(ctx, att, valIdx)
if err != nil {
return err
}
// Update min and max spans.
err = s.updateMinSpan(ctx, source, target, valIdx)
if err != nil {
return err
}
err = s.updateMaxSpan(ctx, source, target, valIdx)
if err != nil {
return err
}
// Save the signature for the received attestation so we can have more detail to find it in the DB.
if err := s.saveSigBytes(ctx, att); err != nil {
return err
}
// Update min and max spans.
if err := s.updateMinSpan(ctx, att); err != nil {
return err
}
if err := s.updateMaxSpan(ctx, att); err != nil {
return err
}
return nil
}
// saveSigBytes saves the first 2 bytes of the signature for the att we're updating the spans to.
// Later used to help us find the violating attestation in the DB.
func (s *SpanDetector) saveSigBytes(ctx context.Context, att *ethpb.IndexedAttestation, valIdx uint64) error {
func (s *SpanDetector) saveSigBytes(ctx context.Context, att *ethpb.IndexedAttestation) error {
ctx, traceSpan := trace.StartSpan(ctx, "spanner.saveSigBytes")
defer traceSpan.End()
target := att.Data.Target.Epoch
sp, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, valIdx, target)
spanMap, err := s.slasherDB.EpochSpansMap(ctx, target)
if err != nil {
return err
}
// If the validator has already attested for this target epoch,
// then we do not need to update the values of the span sig bytes.
if sp.HasAttested {
return nil
}
// We loop through the indices, instead of constantly locking/unlocking the cache for equivalent accesses.
for _, idx := range att.AttestingIndices {
span := spanMap[idx]
// If the validator has already attested for this target epoch,
// then we do not need to update the values of the span sig bytes.
if span.HasAttested {
return nil
}
sigBytes := [2]byte{0, 0}
if len(att.Signature) > 1 {
sigBytes = [2]byte{att.Signature[0], att.Signature[1]}
sigBytes := [2]byte{0, 0}
if len(att.Signature) > 1 {
sigBytes = [2]byte{att.Signature[0], att.Signature[1]}
}
// Save the signature bytes into the span for this epoch.
span.HasAttested = true
span.SigBytes = sigBytes
spanMap[idx] = span
}
// Save the signature bytes into the span for this epoch.
sp.HasAttested = true
sp.SigBytes = sigBytes
return s.slasherDB.SaveValidatorEpochSpans(ctx, valIdx, target, sp)
return s.slasherDB.SaveEpochSpansMap(ctx, target, spanMap)
}
// Updates a min span for a validator index given a source and target epoch
// for an attestation produced by the validator. Used for catching surrounding votes.
func (s *SpanDetector) updateMinSpan(ctx context.Context, source uint64, target uint64, valIdx uint64) error {
func (s *SpanDetector) updateMinSpan(ctx context.Context, att *ethpb.IndexedAttestation) error {
ctx, traceSpan := trace.StartSpan(ctx, "spanner.updateMinSpan")
defer traceSpan.End()
source := att.Data.Source.Epoch
target := att.Data.Target.Epoch
if source < 1 {
return nil
}
for epochInt := int64(source - 1); epochInt >= 0; epochInt-- {
epoch := uint64(epochInt)
span, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, valIdx, epoch)
valIndices := make([]uint64, len(att.AttestingIndices))
copy(valIndices, att.AttestingIndices)
lowestEpoch := source - epochLookback
if int(lowestEpoch) <= 0 {
lowestEpoch = 0
}
latestMinSpanDistanceObserved.Set(float64(att.Data.Target.Epoch - att.Data.Source.Epoch))
for epoch := source - 1; epoch >= lowestEpoch; epoch-- {
spanMap, err := s.slasherDB.EpochSpansMap(ctx, epoch)
if err != nil {
return err
}
newMinSpan := uint16(target - epoch)
if span.MinSpan == 0 || span.MinSpan > newMinSpan {
span = types.Span{
MinSpan: newMinSpan,
MaxSpan: span.MaxSpan,
SigBytes: span.SigBytes,
HasAttested: span.HasAttested,
indices := valIndices[:0]
for _, idx := range valIndices {
span := spanMap[idx]
newMinSpan := uint16(target - epoch)
if span.MinSpan == 0 || span.MinSpan > newMinSpan {
span = types.Span{
MinSpan: newMinSpan,
MaxSpan: span.MaxSpan,
SigBytes: span.SigBytes,
HasAttested: span.HasAttested,
}
spanMap[idx] = span
indices = append(indices, idx)
}
err := s.slasherDB.SaveValidatorEpochSpans(ctx, valIdx, epoch, span)
if err != nil {
return err
}
} else {
}
if err := s.slasherDB.SaveEpochSpansMap(ctx, epoch, spanMap); err != nil {
return err
}
if len(indices) == 0 {
break
}
if epoch == 0 {
break
}
}
@@ -183,25 +240,38 @@ func (s *SpanDetector) updateMinSpan(ctx context.Context, source uint64, target
// Updates a max span for a validator index given a source and target epoch
// for an attestation produced by the validator. Used for catching surrounded votes.
func (s *SpanDetector) updateMaxSpan(ctx context.Context, source uint64, target uint64, valIdx uint64) error {
func (s *SpanDetector) updateMaxSpan(ctx context.Context, att *ethpb.IndexedAttestation) error {
ctx, traceSpan := trace.StartSpan(ctx, "spanner.updateMaxSpan")
defer traceSpan.End()
source := att.Data.Source.Epoch
target := att.Data.Target.Epoch
latestMaxSpanDistanceObserved.Set(float64(target - source))
valIndices := make([]uint64, len(att.AttestingIndices))
copy(valIndices, att.AttestingIndices)
for epoch := source + 1; epoch < target; epoch++ {
span, err := s.slasherDB.EpochSpanByValidatorIndex(ctx, valIdx, epoch)
spanMap, err := s.slasherDB.EpochSpansMap(ctx, epoch)
if err != nil {
return err
}
newMaxSpan := uint16(target - epoch)
if newMaxSpan > span.MaxSpan {
span = types.Span{
MinSpan: span.MinSpan,
MaxSpan: newMaxSpan,
SigBytes: span.SigBytes,
HasAttested: span.HasAttested,
indices := valIndices[:0]
for _, idx := range valIndices {
span := spanMap[idx]
newMaxSpan := uint16(target - epoch)
if newMaxSpan > span.MaxSpan {
span = types.Span{
MinSpan: span.MinSpan,
MaxSpan: newMaxSpan,
SigBytes: span.SigBytes,
HasAttested: span.HasAttested,
}
spanMap[idx] = span
indices = append(indices, idx)
}
err := s.slasherDB.SaveValidatorEpochSpans(ctx, valIdx, epoch, span)
if err != nil {
return err
}
} else {
}
if err := s.slasherDB.SaveEpochSpansMap(ctx, epoch, spanMap); err != nil {
return err
}
if len(indices) == 0 {
break
}
}

View File

@@ -2,24 +2,15 @@ package attestations
import (
"context"
"flag"
"path"
"reflect"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/prysmaticlabs/prysm/slasher/db"
"github.com/prysmaticlabs/prysm/slasher/db/kv"
testDB "github.com/prysmaticlabs/prysm/slasher/db/testing"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/types"
"github.com/prysmaticlabs/prysm/slasher/flags"
"github.com/urfave/cli"
)
const slasherDBName = "slasherdata"
func TestSpanDetector_DetectSlashingForValidator_Double(t *testing.T) {
func TestSpanDetector_DetectSlashingsForAttestation_Double(t *testing.T) {
type testStruct struct {
name string
att *ethpb.IndexedAttestation
@@ -154,7 +145,7 @@ func TestSpanDetector_DetectSlashingForValidator_Double(t *testing.T) {
BeaconBlockRoot: []byte("bad block root"),
},
},
slashCount: 3,
slashCount: 1,
},
{
name: "att with different target, should not detect possible double",
@@ -220,60 +211,49 @@ func TestSpanDetector_DetectSlashingForValidator_Double(t *testing.T) {
BeaconBlockRoot: []byte("good block root"),
},
},
slashCount: 2,
slashCount: 1,
},
}
app := cli.NewApp()
set := flag.NewFlagSet("test", 0)
cliCtx := cli.NewContext(app, set, nil)
baseDir := cliCtx.GlobalString(cmd.DataDirFlag.Name)
dbPath := path.Join(baseDir, slasherDBName)
cfg := &kv.Config{SpanCacheEnabled: cliCtx.GlobalBool(flags.UseSpanCacheFlag.Name)}
d, err := db.NewDB(dbPath, cfg)
ctx := context.Background()
if err != nil {
t.Fatalf("Failed to init slasherDB: %v", err)
}
defer d.ClearDB()
defer d.Close()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := testDB.SetupSlasherDB(t, false)
defer testDB.TeardownSlasherDB(t, db)
ctx := context.Background()
sd := &SpanDetector{
slasherDB: d,
slasherDB: db,
}
if err := sd.UpdateSpans(ctx, tt.att); err != nil {
t.Fatal(err)
}
slashTotal := uint64(0)
for _, valIdx := range sliceutil.IntersectionUint64(tt.att.AttestingIndices, tt.incomingAtt.AttestingIndices) {
res, err := sd.DetectSlashingForValidator(ctx, valIdx, tt.incomingAtt.Data)
if err != nil {
t.Fatal(err)
}
var want *types.DetectionResult
if tt.slashCount > 0 {
slashTotal++
want = &types.DetectionResult{
res, err := sd.DetectSlashingsForAttestation(ctx, tt.incomingAtt)
if err != nil {
t.Fatal(err)
}
var want []*types.DetectionResult
if tt.slashCount > 0 {
want = []*types.DetectionResult{
{
Kind: types.DoubleVote,
SlashableEpoch: tt.incomingAtt.Data.Target.Epoch,
SigBytes: [2]byte{1, 2},
}
}
if !reflect.DeepEqual(res, want) {
t.Errorf("Wanted: %v, received %v", want, res)
},
}
}
if slashTotal != tt.slashCount {
t.Fatalf("Unexpected amount of slashings found, received %d, expected %d", slashTotal, tt.slashCount)
if !reflect.DeepEqual(res, want) {
t.Errorf("Wanted: %v, received %v", want, res)
}
if uint64(len(res)) != tt.slashCount {
t.Fatalf("Unexpected amount of slashings found, received %db, expected %d", len(res), tt.slashCount)
}
})
}
}
func TestSpanDetector_DetectSlashingForValidator_Surround(t *testing.T) {
func TestSpanDetector_DetectSlashingsForAttestation_Surround(t *testing.T) {
type testStruct struct {
name string
sourceEpoch uint64
@@ -465,12 +445,13 @@ func TestSpanDetector_DetectSlashingForValidator_Surround(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
slasherDB := setupSlasherDB(t)
defer teardownSlasherDB(t, slasherDB)
sd := &SpanDetector{
slasherDB: slasherDB,
}
db := testDB.SetupSlasherDB(t, false)
ctx := context.Background()
defer testDB.TeardownSlasherDB(t, db)
sd := &SpanDetector{
slasherDB: db,
}
// We only care about validator index 0 for these tests for simplicity.
validatorIndex := uint64(0)
for k, v := range tt.spansByEpochForValidator {
@@ -480,21 +461,23 @@ func TestSpanDetector_DetectSlashingForValidator_Surround(t *testing.T) {
MaxSpan: v[1],
},
}
err := sd.slasherDB.SaveEpochSpansMap(ctx, k, span)
if err != nil {
if err := sd.slasherDB.SaveEpochSpansMap(ctx, k, span); err != nil {
t.Fatalf("Failed to save to slasherDB: %v", err)
}
}
}
attData := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: tt.sourceEpoch,
},
Target: &ethpb.Checkpoint{
Epoch: tt.targetEpoch,
att := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: tt.sourceEpoch,
},
Target: &ethpb.Checkpoint{
Epoch: tt.targetEpoch,
},
},
AttestingIndices: []uint64{0},
}
res, err := sd.DetectSlashingForValidator(ctx, validatorIndex, attData)
res, err := sd.DetectSlashingsForAttestation(ctx, att)
if err != nil {
t.Fatal(err)
}
@@ -502,9 +485,11 @@ func TestSpanDetector_DetectSlashingForValidator_Surround(t *testing.T) {
t.Fatalf("Did not want validator to be slashed but found slashable offense: %v", res)
}
if tt.shouldSlash {
want := &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: tt.slashableEpoch,
want := []*types.DetectionResult{
{
Kind: types.SurroundVote,
SlashableEpoch: tt.slashableEpoch,
},
}
if !reflect.DeepEqual(res, want) {
t.Errorf("Wanted: %v, received %v", want, res)
@@ -514,7 +499,7 @@ func TestSpanDetector_DetectSlashingForValidator_Surround(t *testing.T) {
}
}
func TestSpanDetector_DetectSlashingForValidator_MultipleValidators(t *testing.T) {
func TestSpanDetector_DetectSlashingsForAttestation_MultipleValidators(t *testing.T) {
type testStruct struct {
name string
sourceEpochs []uint64
@@ -583,12 +568,14 @@ func TestSpanDetector_DetectSlashingForValidator_MultipleValidators(t *testing.T
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
slasherDB := setupSlasherDB(t)
defer teardownSlasherDB(t, slasherDB)
sd := &SpanDetector{
slasherDB: slasherDB,
}
db := testDB.SetupSlasherDB(t, false)
ctx := context.Background()
defer db.ClearDB()
defer db.Close()
sd := &SpanDetector{
slasherDB: db,
}
for i := 0; i < len(tt.spansByEpoch); i++ {
epoch := uint64(i)
err := sd.slasherDB.SaveEpochSpansMap(ctx, epoch, tt.spansByEpoch[epoch])
@@ -597,15 +584,18 @@ func TestSpanDetector_DetectSlashingForValidator_MultipleValidators(t *testing.T
}
}
for valIdx := uint64(0); valIdx < uint64(len(tt.shouldSlash)); valIdx++ {
attData := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: tt.sourceEpochs[valIdx],
},
Target: &ethpb.Checkpoint{
Epoch: tt.targetEpochs[valIdx],
att := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{
Epoch: tt.sourceEpochs[valIdx],
},
Target: &ethpb.Checkpoint{
Epoch: tt.targetEpochs[valIdx],
},
},
AttestingIndices: []uint64{valIdx},
}
res, err := sd.DetectSlashingForValidator(ctx, valIdx, attData)
res, err := sd.DetectSlashingsForAttestation(ctx, att)
if err != nil {
t.Fatal(err)
}
@@ -613,9 +603,11 @@ func TestSpanDetector_DetectSlashingForValidator_MultipleValidators(t *testing.T
t.Fatalf("Did not want validator to be slashed but found slashable offense: %v", res)
}
if tt.shouldSlash[valIdx] {
want := &types.DetectionResult{
Kind: types.SurroundVote,
SlashableEpoch: tt.slashableEpochs[valIdx],
want := []*types.DetectionResult{
{
Kind: types.SurroundVote,
SlashableEpoch: tt.slashableEpochs[valIdx],
},
}
if !reflect.DeepEqual(res, want) {
t.Errorf("Wanted: %v, received %v", want, res)
@@ -735,12 +727,14 @@ func TestNewSpanDetector_UpdateSpans(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
slasherDB := setupSlasherDB(t)
defer teardownSlasherDB(t, slasherDB)
sd := &SpanDetector{
slasherDB: slasherDB,
}
db := testDB.SetupSlasherDB(t, false)
ctx := context.Background()
defer db.ClearDB()
defer db.Close()
sd := &SpanDetector{
slasherDB: db,
}
if err := sd.UpdateSpans(ctx, tt.att); err != nil {
t.Fatal(err)
}
@@ -753,30 +747,6 @@ func TestNewSpanDetector_UpdateSpans(t *testing.T) {
t.Errorf("Wanted and received:\n%v \n%v", tt.want, sm)
}
}
})
}
}
func setupSlasherDB(t *testing.T) *kv.Store {
app := cli.NewApp()
set := flag.NewFlagSet("test", 0)
cliCtx := cli.NewContext(app, set, nil)
baseDir := cliCtx.GlobalString(cmd.DataDirFlag.Name)
dbPath := path.Join(baseDir, slasherDBName)
cfg := &kv.Config{SpanCacheEnabled: cliCtx.GlobalBool(flags.UseSpanCacheFlag.Name)}
slasherDB, err := db.NewDB(dbPath, cfg)
if err != nil {
t.Fatalf("Failed to init slasher db: %v", err)
}
return slasherDB
}
func teardownSlasherDB(t *testing.T, slasherDB *kv.Store) {
if err := slasherDB.ClearDB(); err != nil {
t.Fatal(err)
}
if err := slasherDB.Close(); err != nil {
t.Fatal(err)
}
}

View File

@@ -5,4 +5,5 @@ go_library(
srcs = ["types.go"],
importpath = "github.com/prysmaticlabs/prysm/slasher/detection/attestations/types",
visibility = ["//visibility:public"],
deps = ["//shared/bytesutil:go_default_library"],
)

View File

@@ -1,5 +1,7 @@
package types
import "github.com/prysmaticlabs/prysm/shared/bytesutil"
// DetectionKind defines an enum type that
// gives us information on the type of slashable offense
// found when analyzing validator min-max spans.
@@ -27,6 +29,16 @@ type DetectionResult struct {
SigBytes [2]byte
}
// Marshal the result into bytes, used for removing duplicates.
func (result *DetectionResult) Marshal() []byte {
numBytes := bytesutil.ToBytes(result.SlashableEpoch, 8)
var resultBytes []byte
resultBytes = append(resultBytes, uint8(result.Kind))
resultBytes = append(resultBytes, result.SigBytes[:]...)
resultBytes = append(resultBytes, numBytes...)
return resultBytes
}
// Span defines the structure used for detecting surround and double votes.
type Span struct {
MinSpan uint16

View File

@@ -6,27 +6,29 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
status "github.com/prysmaticlabs/prysm/slasher/db/types"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/types"
"go.opencensus.io/trace"
)
func (ds *Service) detectAttesterSlashings(
ctx context.Context,
att *ethpb.IndexedAttestation,
) ([]*ethpb.AttesterSlashing, error) {
slashings := make([]*ethpb.AttesterSlashing, 0)
for i := 0; i < len(att.AttestingIndices); i++ {
valIdx := att.AttestingIndices[i]
result, err := ds.minMaxSpanDetector.DetectSlashingForValidator(ctx, valIdx, att.Data)
if err != nil {
return nil, err
}
// If the response is nil, there was no slashing detected.
if result == nil {
continue
}
ctx, span := trace.StartSpan(ctx, "detection.detectAttesterSlashings")
defer span.End()
results, err := ds.minMaxSpanDetector.DetectSlashingsForAttestation(ctx, att)
if err != nil {
return nil, err
}
// If the response is nil, there was no slashing detected.
if len(results) == 0 {
return nil, nil
}
var slashings []*ethpb.AttesterSlashing
for _, result := range results {
var slashing *ethpb.AttesterSlashing
switch result.Kind {
case types.DoubleVote:
@@ -40,24 +42,14 @@ func (ds *Service) detectAttesterSlashings(
return nil, errors.Wrap(err, "could not detect surround votes on attestation")
}
}
slashings = append(slashings, slashing)
}
// Clear out any duplicate slashings.
keys := make(map[[32]byte]bool)
var slashingList []*ethpb.AttesterSlashing
for _, ss := range slashings {
hash, err := hashutil.HashProto(ss)
if err != nil {
return nil, err
}
if _, value := keys[hash]; !value {
keys[hash] = true
slashingList = append(slashingList, ss)
if slashing != nil {
slashings = append(slashings, slashing)
}
}
return slashingList, nil
if err = ds.slasherDB.SaveAttesterSlashings(ctx, status.Active, slashings); err != nil {
return nil, err
}
return slashings, nil
}
// detectDoubleVote cross references the passed in attestation with the bloom filter maintained
@@ -67,6 +59,8 @@ func (ds *Service) detectDoubleVote(
incomingAtt *ethpb.IndexedAttestation,
detectionResult *types.DetectionResult,
) (*ethpb.AttesterSlashing, error) {
ctx, span := trace.StartSpan(ctx, "detection.detectDoubleVote")
defer span.End()
if detectionResult == nil || detectionResult.Kind != types.DoubleVote {
return nil, nil
}
@@ -85,14 +79,14 @@ func (ds *Service) detectDoubleVote(
}
if isDoubleVote(incomingAtt, att) {
doubleVotesDetected.Inc()
return &ethpb.AttesterSlashing{
Attestation_1: incomingAtt,
Attestation_2: att,
}, nil
}
}
return nil, errors.New("unexpected false positive in double vote detection")
return nil, nil
}
// detectSurroundVotes cross references the passed in attestation with the requested validator's
@@ -102,6 +96,8 @@ func (ds *Service) detectSurroundVotes(
incomingAtt *ethpb.IndexedAttestation,
detectionResult *types.DetectionResult,
) (*ethpb.AttesterSlashing, error) {
ctx, span := trace.StartSpan(ctx, "detection.detectSurroundVotes")
defer span.End()
if detectionResult == nil || detectionResult.Kind != types.SurroundVote {
return nil, nil
}
@@ -122,11 +118,13 @@ func (ds *Service) detectSurroundVotes(
// Slashings must be submitted as the incoming attestation surrounding the saved attestation.
// So we swap the order if needed.
if isSurrounding(incomingAtt, att) {
surroundingVotesDetected.Inc()
return &ethpb.AttesterSlashing{
Attestation_1: incomingAtt,
Attestation_2: att,
}, nil
} else if isSurrounded(incomingAtt, att) {
surroundedVotesDetected.Inc()
return &ethpb.AttesterSlashing{
Attestation_1: att,
Attestation_2: incomingAtt,

View File

@@ -6,6 +6,7 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
testDB "github.com/prysmaticlabs/prysm/slasher/db/testing"
status "github.com/prysmaticlabs/prysm/slasher/db/types"
"github.com/prysmaticlabs/prysm/slasher/detection/attestations"
)
@@ -78,6 +79,7 @@ func TestDetect_detectAttesterSlashings_Surround(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := testDB.SetupSlasherDB(t, false)
defer testDB.TeardownSlasherDB(t, db)
ctx := context.Background()
ds := Service{
ctx: ctx,
@@ -95,7 +97,10 @@ func TestDetect_detectAttesterSlashings_Surround(t *testing.T) {
if len(slashings) != tt.slashingsFound {
t.Fatalf("Unexpected amount of slashings found, received %d, expected %d", len(slashings), tt.slashingsFound)
}
attsl, err := db.AttesterSlashings(ctx, status.Active)
if len(attsl) != tt.slashingsFound {
t.Fatalf("Didnt save slashing to db")
}
for _, ss := range slashings {
slashingAtt1 := ss.Attestation_1
slashingAtt2 := ss.Attestation_2
@@ -109,7 +114,7 @@ func TestDetect_detectAttesterSlashings_Surround(t *testing.T) {
)
}
}
testDB.TeardownSlasherDB(t, db)
})
}
}
@@ -188,6 +193,7 @@ func TestDetect_detectAttesterSlashings_Double(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := testDB.SetupSlasherDB(t, false)
defer testDB.TeardownSlasherDB(t, db)
ctx := context.Background()
ds := Service{
ctx: ctx,
@@ -202,6 +208,10 @@ func TestDetect_detectAttesterSlashings_Double(t *testing.T) {
if err != nil {
t.Fatal(err)
}
attsl, err := db.AttesterSlashings(ctx, status.Active)
if len(attsl) != tt.slashingsFound {
t.Fatalf("Didnt save slashing to db")
}
if len(slashings) != tt.slashingsFound {
t.Fatalf("Unexpected amount of slashings found, received %d, expected %d", len(slashings), tt.slashingsFound)
}
@@ -217,7 +227,7 @@ func TestDetect_detectAttesterSlashings_Double(t *testing.T) {
)
}
}
testDB.TeardownSlasherDB(t, db)
})
}
}

View File

@@ -65,9 +65,12 @@ func TestService_DetectIncomingAttestations(t *testing.T) {
att := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Slot: 1,
Target: &ethpb.Checkpoint{
Source: &ethpb.Checkpoint{
Epoch: 0,
},
Target: &ethpb.Checkpoint{
Epoch: 1,
},
},
}
exitRoutine := make(chan bool)

View File

@@ -0,0 +1,25 @@
package detection
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
doubleProposalsDetected = promauto.NewCounter(prometheus.CounterOpts{
Name: "double_proposals_detected_total",
Help: "The # of double propose slashable events detected",
})
doubleVotesDetected = promauto.NewCounter(prometheus.CounterOpts{
Name: "double_votes_detected_total",
Help: "The # of double vote slashable events detected",
})
surroundingVotesDetected = promauto.NewCounter(prometheus.CounterOpts{
Name: "surrounding_votes_detected_total",
Help: "The # of surrounding slashable events detected",
})
surroundedVotesDetected = promauto.NewCounter(prometheus.CounterOpts{
Name: "surrounded_votes_detected_total",
Help: "The # of surrounded slashable events detected",
})
)

View File

@@ -83,7 +83,7 @@ func (ds *Service) Start() {
// The detection service runs detection on all historical
// chain data since genesis.
go ds.detectHistoricalChainData(ds.ctx)
// TODO(#5030): Re-enable after issue is resolved.
// We subscribe to incoming blocks from the beacon node via
// our gRPC client to keep detecting slashable offenses.
@@ -141,16 +141,21 @@ func (ds *Service) detectHistoricalChainData(ctx context.Context) {
}
func (ds *Service) submitAttesterSlashings(ctx context.Context, slashings []*ethpb.AttesterSlashing, epoch uint64) {
ctx, span := trace.StartSpan(ctx, "detection.submitAttesterSlashings")
defer span.End()
var slashedIndices []uint64
for i := 0; i < len(slashings); i++ {
slashableIndices := sliceutil.IntersectionUint64(slashings[i].Attestation_1.AttestingIndices, slashings[i].Attestation_2.AttestingIndices)
slashedIndices = append(slashedIndices, slashableIndices...)
ds.attesterSlashingsFeed.Send(slashings[i])
}
if len(slashings) > 0 {
log.WithFields(logrus.Fields{
"targetEpoch": epoch,
"indices": slashedIndices,
}).Infof("Found %d attester slashings! Submitting to beacon node.", len(slashings))
}).Infof("Found %d attester slashings! Submitting to beacon node", len(slashings))
}
for i := 0; i < len(slashings); i++ {
slash := slashings[i]
if slash != nil && slash.Attestation_1 != nil && slash.Attestation_2 != nil {
slashableIndices := sliceutil.IntersectionUint64(slashings[i].Attestation_1.AttestingIndices, slashings[i].Attestation_2.AttestingIndices)
slashedIndices = append(slashedIndices, slashableIndices...)
ds.attesterSlashingsFeed.Send(slashings[i])
}
}
}

View File

@@ -10,6 +10,7 @@ go_library(
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/event:go_default_library",
"//shared/prometheus:go_default_library",
"//shared/tracing:go_default_library",
"//slasher/beaconclient:go_default_library",
"//slasher/db:go_default_library",

View File

@@ -2,6 +2,7 @@ package node
import (
"context"
"fmt"
"os"
"os/signal"
"path"
@@ -12,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/debug"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/prometheus"
"github.com/prysmaticlabs/prysm/shared/tracing"
"github.com/prysmaticlabs/prysm/slasher/beaconclient"
"github.com/prysmaticlabs/prysm/slasher/db"
@@ -60,6 +62,9 @@ func NewSlasherNode(ctx *cli.Context) (*SlasherNode, error) {
services: registry,
stop: make(chan struct{}),
}
if err := slasher.registerPrometheusService(ctx); err != nil {
return nil, err
}
if err := slasher.startDB(ctx); err != nil {
return nil, err
@@ -117,6 +122,15 @@ func (s *SlasherNode) Close() {
close(s.stop)
}
func (s *SlasherNode) registerPrometheusService(ctx *cli.Context) error {
service := prometheus.NewPrometheusService(
fmt.Sprintf(":%d", ctx.GlobalInt64(cmd.MonitoringPortFlag.Name)),
s.services,
)
logrus.AddHook(prometheus.NewLogrusCollector())
return s.services.RegisterService(service)
}
func (s *SlasherNode) startDB(ctx *cli.Context) error {
baseDir := ctx.GlobalString(cmd.DataDirFlag.Name)
clearDB := ctx.GlobalBool(cmd.ClearDB.Name)

0
third_party/herumi/BUILD.bazel vendored Normal file
View File

24
third_party/herumi/bls.BUILD vendored Normal file
View File

@@ -0,0 +1,24 @@
package(default_visibility = ["//visibility:public"])
cc_library(
name = "bls_c384_256",
srcs = [
"src/bls_c384_256.cpp",
],
deps = [
"@herumi_mcl//:bn",
],
hdrs = [
"include/bls/bls.h",
"src/bls_c_impl.hpp",
"src/qcoeff-bn254.hpp",
],
includes = [
"include",
],
copts = [
"-DBLS_SWAP_G",
"-DBLS_ETH",
"-std=c++03",
],
)

View File

@@ -0,0 +1,119 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
OPTS = [
"-DMCL_USE_VINT",
"-DMCL_DONT_USE_OPENSSL",
"-DMCL_LLVM_BMI2=0",
"-DMCL_USE_LLVM=1",
"-DMCL_VINT_FIXED_BUFFER",
"-DMCL_SIZEOF_UNIT=8",
"-DMCL_MAX_BIT_SIZE=384",
"-DCYBOZU_DONT_USE_EXCEPTION",
"-DCYBOZU_DONT_USE_STRING",
"-DBLS_SWAP_G",
"-DBLS_ETH",
]
genrule(
name = "base64_ll",
outs = ["src/base64.ll"], # llvm assembly language file.
tools = [
"@herumi_mcl//:src_gen",
],
cmd = "touch func.list && $(location @herumi_mcl//:src_gen) -u 64 -f func.list > $@",
)
genrule(
name = "base64_o",
srcs = [
"src/base64.ll",
],
outs = ["base64.o"],
cmd = "external/llvm_toolchain/bin/clang++ -c -o $@ $(location src/base64.ll)",
tools = ["@llvm_toolchain//:clang"],
)
cc_library(
name = "lib",
srcs = [
"@herumi_mcl//:src/fp.cpp",
"@herumi_bls//:src/bls_c384_256.cpp",
"@herumi_bls//:src/bls_c_impl.hpp",
":base64_o",
],
deps = ["@herumi_mcl//:bn"],
includes = [
"bls/include",
],
hdrs = [
"bls/include/bls/bls.h",
"bls/include/bls/bls384_256.h",
"bls/include/mcl/bn.h",
"bls/include/mcl/bn_c384_256.h",
"@herumi_mcl//:include/mcl/curve_type.h",
],
copts = OPTS + [
"-std=c++03",
],
)
cc_library(
name = "precompiled",
srcs = select({
"@io_bazel_rules_go//go/platform:android_arm": [
"bls/lib/android/armeabi-v7a/libbls384_256.a",
],
"@io_bazel_rules_go//go/platform:linux_arm64": [
"bls/lib/android/arm64-v8a/libbls384_256.a",
],
"@io_bazel_rules_go//go/platform:android_arm64": [
"bls/lib/android/arm64-v8a/libbls384_256.a",
],
"@io_bazel_rules_go//go/platform:darwin_amd64": [
"bls/lib/darwin/amd64/libbls384_256.a",
],
"@io_bazel_rules_go//go/platform:linux_amd64": [
"bls/lib/linux/amd64/libbls384_256.a",
],
"@io_bazel_rules_go//go/platform:windows_amd64": [
"bls/lib/windows/amd64/libbls384_256.a",
],
"//conditions:default": [],
}),
hdrs = [
"bls/include/bls/bls.h",
"bls/include/bls/bls384_256.h",
"bls/include/mcl/bn.h",
"bls/include/mcl/bn_c384_256.h",
"bls/include/mcl/curve_type.h",
],
includes = [
"bls/include",
],
deprecation = "Using precompiled BLS archives. To build BLS from source with llvm, use --config=llvm.",
)
config_setting(
name = "llvm_compiler_enabled",
define_values = {
"compiler": "llvm",
},
)
go_library(
name = "go_default_library",
importpath = "github.com/herumi/bls-eth-go-binary/bls",
srcs = [
"bls/bls.go",
"bls/callback.go",
"bls/cast.go",
"bls/mcl.go",
],
cdeps = select({
":llvm_compiler_enabled": [":lib"],
"//conditions:default": [":precompiled"],
}),
copts = OPTS,
cgo = True,
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,49 @@
diff --git a/bls/bls.go b/bls/bls.go
index bc3b607..f6fa95f 100644
--- a/bls/bls.go
+++ b/bls/bls.go
@@ -157,7 +157,7 @@ type SecretKey struct {
// Serialize --
func (sec *SecretKey) Serialize() []byte {
- buf := make([]byte, 2048)
+ buf := make([]byte, 32)
// #nosec
n := C.blsSecretKeySerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sec.v)
if n == 0 {
@@ -354,7 +354,7 @@ func (keys PublicKeys) JSON() string {
// Serialize --
func (pub *PublicKey) Serialize() []byte {
- buf := make([]byte, 2048)
+ buf := make([]byte, 48)
// #nosec
n := C.blsPublicKeySerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &pub.v)
if n == 0 {
@@ -452,7 +452,7 @@ type Sign struct {
// Serialize --
func (sig *Sign) Serialize() []byte {
- buf := make([]byte, 2048)
+ buf := make([]byte, 96)
// #nosec
n := C.blsSignatureSerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sig.v)
if n == 0 {
@@ -665,7 +665,7 @@ func (sig *Sign) VerifyHashWithDomain(pub *PublicKey, hashWithDomain []byte) boo
// SerializeUncompressed --
func (pub *PublicKey) SerializeUncompressed() []byte {
- buf := make([]byte, 2048)
+ buf := make([]byte, 96)
// #nosec
n := C.blsPublicKeySerializeUncompressed(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &pub.v)
if n == 0 {
@@ -676,7 +676,7 @@ func (pub *PublicKey) SerializeUncompressed() []byte {
// SerializeUncompressed --
func (sig *Sign) SerializeUncompressed() []byte {
- buf := make([]byte, 2048)
+ buf := make([]byte, 192)
// #nosec
n := C.blsSignatureSerializeUncompressed(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sig.v)
if n == 0 {

50
third_party/herumi/herumi.bzl vendored Normal file
View File

@@ -0,0 +1,50 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
"""
Herumi's BLS library for go depends on
- herumi/mcl
- herumi/bls
- herumi/bls-eth-go-binary
"""
def bls_dependencies():
# TODO(4804): Update herumi_bls_eth_go_binary and herumi_bls to latest supporting v0.10.0.
_maybe(
http_archive,
name = "herumi_bls_eth_go_binary",
strip_prefix = "bls-eth-go-binary-147ed25f233ed0b211e711ed8271606540c58064",
urls = [
"https://github.com/herumi/bls-eth-go-binary/archive/147ed25f233ed0b211e711ed8271606540c58064.tar.gz",
],
sha256 = "bbd04f3354f12982e4ef32c62eb13ceb183303ada1ee69e2869553ed35134321",
build_file = "@prysm//third_party/herumi:bls_eth_go_binary.BUILD",
# TODO(4804): Delete this patch after updating this archive to commit 381c62473c28af84f424cfb1521c97e48289174a or later.
patches = [
"@prysm//third_party/herumi:bls_eth_go_binary_serialization_alloc_fix.patch", # Integrates changes from PR #5.
],
patch_args = ["-p1"],
)
_maybe(
http_archive,
name = "herumi_mcl",
strip_prefix = "mcl-1b043ade54bf7e30b8edc29eb01410746ba92d3d",
urls = [
"https://github.com/herumi/mcl/archive/1b043ade54bf7e30b8edc29eb01410746ba92d3d.tar.gz",
],
sha256 = "306bf22b747db174390bbe43de503131b0b5b75bbe586d44f3465c16bda8d28a",
build_file = "@prysm//third_party/herumi:mcl.BUILD",
)
_maybe(
http_archive,
name = "herumi_bls",
strip_prefix = "bls-b0e010004293a7ffd2a626edc2062950abd09938",
urls = [
"https://github.com/herumi/bls/archive/b0e010004293a7ffd2a626edc2062950abd09938.tar.gz",
],
sha256 = "c7300970c8a639cbbe7465d10f412d6c6ab162b15f2e184b191c9763c2241da4",
build_file = "@prysm//third_party/herumi:bls.BUILD",
)
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)

80
third_party/herumi/mcl.BUILD vendored Normal file
View File

@@ -0,0 +1,80 @@
package(default_visibility = ["//visibility:public"])
MCL_OPTS = [
"-DMCL_USE_VINT",
"-DMCL_DONT_USE_OPENSSL",
"-DMCL_LLVM_BMI2=0",
"-DMCL_USE_LLVM=1",
"-DMCL_VINT_FIXED_BUFFER",
"-DMCL_SIZEOF_UNIT=8",
"-DMCL_MAX_BIT_SIZE=384",
"-DCYBOZU_DONT_USE_EXCEPTION",
"-DCYBOZU_DONT_USE_STRING",
"-std=c++03 ",
]
cc_library(
name = "fp",
srcs = [
"src/fp.cpp",
"src/asm/x86-64.s",
],
includes = [
"include",
],
hdrs = glob([
"src/xbyak/*.h",
"include/cybozu/*.hpp",
]) + [
"include/mcl/op.hpp",
"include/mcl/gmp_util.hpp",
"include/mcl/vint.hpp",
"include/mcl/randgen.hpp",
"include/mcl/array.hpp",
"include/mcl/util.hpp",
"include/mcl/fp_tower.hpp",
"include/mcl/fp.hpp",
"include/mcl/conversion.hpp",
"src/low_func.hpp",
"src/fp_generator.hpp",
"src/proto.hpp",
"src/low_func_llvm.hpp",
],
copts = MCL_OPTS,
)
cc_library(
name = "bn",
srcs = [
"src/bn_c384_256.cpp",
],
deps = [":fp"],
hdrs = [
"include/mcl/bn.h",
"include/mcl/curve_type.h",
"include/mcl/impl/bn_c_impl.hpp",
"include/mcl/bls12_381.hpp",
"include/mcl/bn_c384_256.h",
"include/mcl/ec.hpp",
"include/mcl/mapto_wb19.hpp",
"include/mcl/ecparam.hpp",
"include/mcl/lagrange.hpp",
"include/mcl/bn.hpp",
"include/mcl/operator.hpp",
],
includes = ["include"],
copts = MCL_OPTS,
)
# src_gen is a tool to generate some llvm assembly language file.
cc_binary(
name = "src_gen",
srcs = [
"src/gen.cpp",
"src/llvm_gen.hpp",
] + glob([
"include/cybozu/*.hpp",
"include/mcl/*.hpp",
]),
includes = ["include"],
)

View File

@@ -40,14 +40,8 @@ platform(
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:clang",
],
remote_execution_properties = """
properties: {
name: "container-image"
value:"docker://gcr.io/prysmaticlabs/rbe-worker@sha256:d7407d58cee310e7ab788bf4256bba704344630621d8507f3c9cf253c7fc664f"
}
properties {
name: "OSFamily"
value: "Linux"
}
""",
exec_properties = {
"container-image": "docker://gcr.io/prysmaticlabs/rbe-worker@sha256:d7407d58cee310e7ab788bf4256bba704344630621d8507f3c9cf253c7fc664f",
"OSFamily": "Linux",
},
)

View File

@@ -23,7 +23,7 @@ toolchain(
exec_compatible_with = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:clang",
"@bazel_tools//tools/cpp:gcc",
],
target_compatible_with = [
"@bazel_tools//platforms:linux",
@@ -38,16 +38,10 @@ platform(
constraint_values = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:clang",
"@bazel_tools//tools/cpp:gcc",
],
remote_execution_properties = """
properties: {
name: "container-image"
value:"docker://gcr.io/prysmaticlabs/rbe-worker@sha256:d7407d58cee310e7ab788bf4256bba704344630621d8507f3c9cf253c7fc664f"
}
properties {
name: "OSFamily"
value: "Linux"
}
""",
exec_properties = {
"container-image": "docker://gcr.io/prysmaticlabs/rbe-worker@sha256:d7407d58cee310e7ab788bf4256bba704344630621d8507f3c9cf253c7fc664f",
"OSFamily": "Linux",
},
)

View File

@@ -48,6 +48,12 @@ def _rbe_toolchains_generator():
config_repos = [
"prysm_toolchains",
],
use_legacy_platform_definition = False,
exec_compatible_with = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:clang",
],
)
rbe_autoconfig(
@@ -64,6 +70,12 @@ def _rbe_toolchains_generator():
config_repos = [
"prysm_toolchains",
],
use_legacy_platform_definition = False,
exec_compatible_with = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:gcc",
],
)
def _generated_rbe_toolchains():
@@ -77,6 +89,12 @@ def _generated_rbe_toolchains():
toolchain_config_spec_name = "clang",
toolchain_config_suite_spec = _TOOLCHAIN_CONFIG_SUITE_SPEC,
use_checked_in_confs = "Force",
use_legacy_platform_definition = False,
exec_compatible_with = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
"@bazel_tools//tools/cpp:clang",
],
)
def rbe_toolchains_config():

70
tools/ssz.bzl Normal file
View File

@@ -0,0 +1,70 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"GoLibrary",
)
def _ssz_go_proto_library_impl(ctx):
go_proto = ctx.attr.go_proto
generated_pb_go_files = go_proto[OutputGroupInfo].go_generated_srcs
# Run the tool on the generated files
package_path = generated_pb_go_files.to_list()[0].dirname
output = ctx.outputs.out
args = [
"--output=%s" % output.path,
"--path=%s" % package_path,
]
if len(ctx.attr.objs) > 0:
args += ["--objs=%s" % ",".join(ctx.attr.objs)]
ctx.actions.run(
executable = ctx.executable.sszgen,
progress_message = "Generating ssz marshal and unmarshal functions",
inputs = generated_pb_go_files,
arguments = args,
outputs = [output],
)
"""
A rule that uses the generated pb.go files from a go_proto_library target to generate SSZ marshal
and unmarshal functions as pointer receivers on the specified objects. To use this rule, provide a
go_proto_library target and specify the structs to generate methods in the "objs" field. Lastly,
include your new target as a source for the go_library that embeds the go_proto_library.
Example:
go_proto_library(
name = "example_go_proto",
...
)
ssz_gen_marshal(
name = "ssz_generated_sources",
go_proto = ":example_go_proto",
objs = [ # omit this field to generate for all structs in the package.
"AddressBook",
"Person",
],
)
go_library(
name = "go_default_library",
srcs = [":ssz_generated_sources"],
embed = [":example_go_proto"],
deps = SSZ_DEPS,
)
"""
ssz_gen_marshal = rule(
implementation = _ssz_go_proto_library_impl,
attrs = {
"go_proto": attr.label(providers = [GoLibrary]),
"sszgen": attr.label(
default = Label("@com_github_ferranbt_fastssz//sszgen:sszgen"),
executable = True,
cfg = "host",
),
"objs": attr.string_list(),
},
outputs = {"out": "generated.ssz.go"},
)
SSZ_DEPS = ["@com_github_ferranbt_fastssz//:go_default_library"]

View File

@@ -16,6 +16,8 @@ import (
"github.com/gogo/protobuf/proto"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
@@ -49,6 +51,18 @@ type validator struct {
domainDataCache *ristretto.Cache
}
var validatorStatusesGaugeVec = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "validator",
Name: "statuses",
Help: "validator statuses: 0 UNKNOWN, 1 DEPOSITED, 2 PENDING, 3 ACTIVE, 4 EXITING, 5 SLASHING, 6 EXITED",
},
[]string{
// Validator pubkey.
"pubkey",
},
)
// Done cleans up the validator.
func (v *validator) Done() {
v.ticker.Done()
@@ -175,6 +189,10 @@ func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.Valida
"pubKey": fmt.Sprintf("%#x", bytesutil.Trunc(status.PublicKey[:])),
"status": status.Status.Status.String(),
})
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", status.PublicKey[:])
validatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status))
}
if status.Status.Status == ethpb.ValidatorStatus_ACTIVE {
activatedKeys = append(activatedKeys, status.PublicKey)
continue
@@ -269,6 +287,11 @@ func (v *validator) UpdateDuties(ctx context.Context, slot uint64) error {
"status": duty.Status,
}
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", duty.PublicKey[:])
validatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status))
}
if duty.Status == ethpb.ValidatorStatus_ACTIVE {
if duty.ProposerSlot > 0 {
lFields["proposerSlot"] = duty.ProposerSlot

View File

@@ -24,7 +24,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
validatorAggFailVec = promauto.NewCounterVec(
@@ -34,7 +34,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
)

View File

@@ -30,7 +30,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
validatorAttestFailVec = promauto.NewCounterVec(
@@ -40,7 +40,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
)

View File

@@ -20,7 +20,7 @@ var validatorBalancesGaugeVec = promauto.NewGaugeVec(
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)

View File

@@ -29,7 +29,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
validatorProposeFailVec = promauto.NewCounterVec(
@@ -39,7 +39,7 @@ var (
},
[]string{
// validator pubkey
"pkey",
"pubkey",
},
)
)
@@ -194,18 +194,31 @@ func (v *validator) signBlock(ctx context.Context, pubKey [48]byte, epoch uint64
if err != nil {
return nil, errors.Wrap(err, "could not get domain data")
}
root, err := ssz.HashTreeRoot(b)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
}
var sig *bls.Signature
if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported {
sig, err = protectingKeymanager.SignProposal(pubKey, domain.SignatureDomain, b)
bodyRoot, err := ssz.HashTreeRoot(b.Body)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
}
blockHeader := &ethpb.BeaconBlockHeader{
Slot: b.Slot,
StateRoot: b.StateRoot,
ParentRoot: b.ParentRoot,
BodyRoot: bodyRoot[:],
}
sig, err = protectingKeymanager.SignProposal(pubKey, domain.SignatureDomain, blockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not sign block proposal")
}
} else {
sig, err = v.keyManager.Sign(pubKey, root, domain.SignatureDomain)
}
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
blockRoot, err := ssz.HashTreeRoot(b)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
}
sig, err = v.keyManager.Sign(pubKey, blockRoot, domain.SignatureDomain)
if err != nil {
return nil, errors.Wrap(err, "could not sign block proposal")
}
}
return sig.Marshal(), nil
}

View File

@@ -27,7 +27,7 @@ type KeyManager interface {
// ProtectingKeyManager provides access to a keymanager that protects its clients from slashing events.
type ProtectingKeyManager interface {
// SignProposal signs a block proposal for the validator to broadcast.
SignProposal(pubKey [48]byte, domain uint64, data *ethpb.BeaconBlock) (*bls.Signature, error)
SignProposal(pubKey [48]byte, domain uint64, data *ethpb.BeaconBlockHeader) (*bls.Signature, error)
// SignAttestation signs an attestation for the validator to broadcast.
SignAttestation(pubKey [48]byte, domain uint64, data *ethpb.AttestationData) (*bls.Signature, error)

View File

@@ -22,7 +22,6 @@ go_library(
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/prometheus:go_default_library",
"//shared/tracing:go_default_library",
"//shared/version:go_default_library",

Some files were not shown because too many files have changed in this diff Show More