Compare commits

...

659 Commits

Author SHA1 Message Date
terence tsao
ed7ad4525e Method to retrieve block slot via block root (#5084)
* blockRootSlot

* Tests

* Gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 16:04:24 -05:00
terence tsao
7fcc07fb45 Save hot state (#5083)
* loadEpochBoundaryRoot
* Tests
* Span
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Starting test
* Tests
* Merge refs/heads/master into save-hot-state
* Merge branch 'master' into save-hot-state
* Use copy
* Merge branch 'save-hot-state' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Merge refs/heads/master into save-hot-state
2020-03-12 20:48:07 +00:00
shayzluf
f937713fe9 Broadcast slashing (#5073)
* add flag
* broadcast slashings
* Merge branch 'master' of github.com:prysmaticlabs/prysm into broadcast_slashing

# Conflicts:
#	beacon-chain/rpc/beacon/slashings_test.go
* fix tests
* goimports
* goimports
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
2020-03-12 20:29:23 +00:00
terence tsao
359e0abe1d Load epoch boundary root (#5079)
* loadEpochBoundaryRoot

* Tests

* Span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 15:00:37 -05:00
tzapu
0704ba685a Return statuses on duties (#5069)
* try to return somethign for everything
* default to unknown
* debug
* moar debug
* move else to outer check
* working
* reorder imports
* cleanup
* fix TestGetDuties_NextEpoch_CantFindValidatorIdx
* Merge branch 'master' into return-statuses-on-duties
* Update validator/client/validator.go
* Merge branch 'master' into return-statuses-on-duties
* Merge branch 'master' into return-statuses-on-duties
2020-03-12 19:07:37 +00:00
shayzluf
0f95b797af Save slashings to slasher DB (#5081)
* fix tests add error type handling

* Update slasher/detection/detect_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* goimports

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-03-12 22:08:58 +05:30
terence tsao
43722e45f4 Save cold state (#5077) 2020-03-12 05:58:06 -07:00
terence tsao
ff4ed413a3 State migration from hot to cold (archived) (#5076)
* Starting

* Test

* Tests

* comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-11 21:27:16 -05:00
Raul Jordan
f1a42eb589 Verify Slashing Signatures Before Putting Into Blocks (#5071)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* verify slashing
* added in test for pending att slashing
* tests starting to apss
* sig failed verify regression test
* tests passing for ops pool
* Update beacon-chain/operations/slashings/service.go
* Merge refs/heads/master into verify-slash-sig
* verify on insert
* tests starting to pass
* all code paths fixed
* imports
* fix build
* fix rpc errors
* Merge refs/heads/master into verify-slash-sig
2020-03-12 01:16:55 +00:00
terence tsao
a90ffaba49 Archived point retrieval and recovery (#5075) 2020-03-11 17:38:30 -07:00
Raul Jordan
663d919b6f Include Bazel Genrule for Fast SSZ (#5070)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* included new ssz bzl rule
* Merge branch 'master' into add-in-starlark-rule
* Update tools/ssz.bzl

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-11 19:50:22 +00:00
Victor Farazdagi
7b30845c01 fixes races in blocks fetcher (#5068) 2020-03-11 14:21:41 +03:00
Victor Farazdagi
46eb228379 fixes data race in state.Slot (#5067)
* fixes data race in state/getters
2020-03-11 09:11:07 +00:00
Raul Jordan
8d3fc1ad3e Add in Slasher Metrics (#5060)
* added in slasher metrics
* Merge branch 'master' into slasher-metrics
* add in prom bolt metrics for slasher
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
* imports
* include all metrics
* no dup bolt collector
* Update slasher/detection/attestations/spanner.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* naming best practices for prom, thx Terence
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
2020-03-10 19:41:55 +00:00
Nishant Das
93195b762b Improve HTR of State (#5058)
* add cache
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/hash_function.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge branch 'master' into improveHTR
* add back string casting
* fix imports
2020-03-10 16:26:54 +00:00
Jim McDonald
f0abf0d7d5 Reduce frequency of 'eth1 client not syncing' messages (#5057) 2020-03-10 09:51:41 -05:00
Nishant Das
9d27449212 Discovery Fixes (#5050)
* connect to dv5 bootnodes

* fix test

* change polling period

* ignore

* Update beacon-chain/p2p/service.go

* Update beacon-chain/p2p/service_test.go

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-09 19:53:37 -07:00
Preston Van Loon
edb6590764 Build herumi's BLS from source (#5055)
* Build herumi from source. Working so far on linux_amd64 for compile, but tests fail to initialize the curve appropriately

* Add copts to go_default_library

* llvm toolchain, still WIP

* Fixes, make llvm a config flag

* fix gazelle resolution

* comment

* comment

* update herumi to the v0.9.4 version

* Apply @nisdas patch from https://github.com/herumi/bls-eth-go-binary/pull/5
2020-03-09 21:22:41 -05:00
Raul Jordan
e77cf724b8 Better Nil Check in Slasher (#5053)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* some nil checks in slasher
2020-03-09 21:21:39 +00:00
Ivan Martinez
b633dfe880 Change detection and updating in Slasher to per attestation (#5043)
* Change span updates to update multiple validators at once

* Change detection to perform on multiple validators at once

* Fix minspan issue

* Fix indices

* Fix test

* Remove logs

* Remove more logs

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/detect.go

* nil check

* fix ununsed import

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 13:14:19 -05:00
Ivan Martinez
8334aac111 Batch saving of attestations from stream for slasher (#5041)
* Batch saving of attestations from stream for slasher

* Progress on test

* Fixes

* Fix test

* Rename

* Modify logs and timing

* Change

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 12:49:40 -05:00
Preston Van Loon
4c1e2ba196 Add prysm.sh script (#5042)
* Add prysm.sh script

* Add dist to gitignore

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-09 12:19:53 -05:00
terence tsao
25c13663d2 Add hot state by slot retrival (#5052)
* Update replay conditions

* loadHotStateBySlot

* Tests and gaz

* Tests
2020-03-09 11:22:45 -05:00
Jim McDonald
0c3af32274 Use BeaconBlockHeader in place of BeaconBlock (#5049) 2020-03-09 21:08:30 +08:00
shayzluf
01cb01a8f2 On eviction test fix (#5046) 2020-03-09 01:35:39 -04:00
Raul Jordan
0c9e99e04a Aggregate Attestations Before Streaming to Slasher (#5029)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* aggregate before streaming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* collect atts and increase buffer size
* fix test for func
* Merge refs/heads/master into agg-idx-atts
* Update beacon-chain/rpc/beacon/attestations.go
* Merge refs/heads/master into agg-idx-atts
* naming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* comment terence feedback
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Fix tests
2020-03-08 21:39:54 +00:00
Ivan Martinez
d4cd51f23e Change slasher cache to LRU cache (#5037)
* Change cache to LRU cache

* fixes

* REduce db usage

* Fix function name

* Merge issues

* Save on eviction

* Fixes

* Fix

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-08 17:11:59 -04:00
terence tsao
962fe8552d Compute state up to slot (#5035) 2020-03-08 21:41:24 +01:00
Raul Jordan
eddaea869b Prepare Slasher for Production (#5020)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* add a bit more better logging
* Empty db fix
* Improve logs
* Fix small issues in spanner, improvements
* Change costs back to 1 for now
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Change the cache back to 0
* Cleanup
* Merge branch 'master' into cleanup-slasher
* lint
* added in better spans
* log
* rem spanner in super intensive operation
* Merge branch 'master' into cleanup-slasher
* add todo
* Merge branch 'cleanup-slasher' of github.com:prysmaticlabs/prysm into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Apply suggestions from code review
* no logrus
* Merge branch 'master' into cleanup-slasher
* Merge branch 'cleanup-slasher' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Remove spammy logs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* gaz
* Rename func
* Add back needed code
* Add todo
* Add span to cache func
2020-03-08 17:56:43 +00:00
Nishant Das
300d072456 Add Config Change for Validator (#5038)
* add config for validator
* gaz
* Merge refs/heads/master into configureValidator
* Merge refs/heads/master into configureValidator
2020-03-08 06:45:36 +00:00
Nishant Das
ac1c92e241 Add Prometheus Service for Slasher (#5039)
* add prometheus service
* Update slasher/node/node.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into addPromServiceSlasher
2020-03-08 06:35:37 +00:00
terence tsao
2452c7403b Load hot state by root (#5034)
* Add loadHotStateByRoot

* Touchup loadHotStateByRoot

* Tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-08 14:24:57 +08:00
Preston Van Loon
b97e22107c Update rbe_autoconf (#5036)
* Update rbe_autoconf
* Update timestamps
2020-03-07 21:18:16 +00:00
Preston Van Loon
98faf95943 Define -c opt for release builds (#5033)
* define -c opt for release builds
* Merge branch 'master' into c-opt
2020-03-07 05:50:26 +00:00
Preston Van Loon
af28862e94 Add sha256 to external dependency librdkafka (#5032)
* Add sha256 to external dependency librdkafka
2020-03-07 05:31:07 +00:00
Jim McDonald
b133eb6c4a Warn rather than fail on incorrect keystore password (#5024)
* Warn on failure to decrypt a keystore validator

* Update test

* Update tools

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-06 23:05:48 -06:00
Nishant Das
345ec1bf8c Fix Custom Delay Flag (#5026)
* fix flag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* fix config
* Merge branch 'fixFlag' of https://github.com/prysmaticlabs/geth-sharding into fixFlag
2020-03-07 03:52:40 +00:00
Nishant Das
d1fea430d6 change limit (#5027)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 17:26:08 -06:00
terence tsao
054b15bc45 Add SlotsPerArchivedPoint flag and a check (#5023)
* Flag

* Service

* Tests

* Tests and comments

* Lint

* Add to usages

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 17:06:01 -06:00
Preston Van Loon
6a2955d43c Update bazel.sh (#5028)
* Add google auth creds as environment variable for CI. Add a comment why this script is helpful
* Add google auth creds as environment variable for CI. Add a comment why this script is helpful
2020-03-06 17:43:06 +00:00
Jim McDonald
0ecd83afbb Avoid crash due to invalid index (#5025)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-06 09:38:43 -06:00
Nishant Das
069f2c5fb6 Asynchronous Dials To Peers (#5021)
* make dial non-blocking

* add sleep

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 22:57:47 +08:00
Raul Jordan
acb15a1f04 Report Validator Status in Duties (#5017)
* fix up status reporting
* Merge refs/heads/master into properly-report-status
* Merge refs/heads/master into properly-report-status
* Merge refs/heads/master into properly-report-status
2020-03-06 06:18:14 +00:00
Preston Van Loon
e2af70f692 Run buildifer, remove duplicated WORKSPACE entries (#5018)
* Buildifier, add release config
* Merge branch 'master' into bazel-stuff
* Merge refs/heads/master into bazel-stuff
* Merge refs/heads/master into bazel-stuff
* revert gnostic
* Set kafka for CI tests only
* add bazel.sh script
* set home
2020-03-06 04:42:27 +00:00
Raul Jordan
15b5ec89b2 Cross-compile OSX: Remove dropbox link, add sha256 check (#5019)
* Remove dropbox link, add sha256 check

* Use docker's S3 link instead of dropbox

* Update image sha, workspace

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-05 16:32:52 -06:00
Victor Farazdagi
b4aaa610a1 Fixes race condition at genesis (#5016)
* fixes race condition at genesis
2020-03-06 00:20:38 +03:00
Raul Jordan
6158a648cd Plugging in spanner db (#5009)
* rem slasher proto

* with cache

* delete old code

* moving to bytes.go fix traces

* moving to bytes.go fix traces

* raul feedback

* raul feedback

* begin

* add eviction test

* ivan feedback

* ivan feedback

* test is running

* some comment improvements

* test included for bytes and bool

* import

* cleanup

* tests pass

* fill in all fields in test

* gaz

* fix integration

* gaz + goimports

* fix service.go

* remove sleep

* cleanup

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-05 12:11:54 -06:00
prylabs-bulldozer[bot]
e2a6f5a6ea First cut at multi-arch cross compiling toolchain (#4945)
*  PRYSM-2849 first cut at multi-arch cross compiling toolchain.  currently supports arm64 and amd64 via docker cross compiler image
* picky linter
* some readme cleanup
* remove arm 8.2 revision for arm64 builds (cortex a72 is ARMv8.0-A)
remove arm32 toolchain from multiarch dockerfile
* remove extranous WORKSPACE entries
* add docker remote execution configs for amd64 and arm64
* add osx bazelrc configs
* working osx toolchain
* update readme
* cleanup for amd, arm and osx cross before beginning windows
* initial stab at mingw windows cross
* add docker target for windows_amd64 and update readme for cross-compiling
* little more cleanup for readability
* Check in generated RBE. Still tweaking config but linux amd64 -> linux amd64 on RBE works OK. Cross compile does not work properly in RBE yet.
* fix
* update image
* Making some progress
* delete artifacts
* Working build
* Add remote config
* remove some things I added to README
* Tidy
* Update readme
* remove 2 commented lines
* buildifer
* Merge pull request #1 from prysmaticlabs/cross-compile-with-suburbandad

Cross compile with suburbandad
* Merge branch 'master' into clang-cross-compile
* buildifier on generated stuff
* Merge branch 'master' into clang-cross-compile
* Merge branch 'master' into clang-cross-compile
* Merge branch 'master' into clang-cross-compile
2020-03-05 16:59:56 +00:00
Raul Jordan
aebc883a0d Methods to retrieves last saved state and block for stategen pkg (#5005)
* Added last saved block and state

* Genesis tests

* Gaz

* Added state tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-05 10:22:20 -06:00
Raul Jordan
f3dc113dba add multi-options (#5012) 2020-03-05 08:49:26 -06:00
prylabs-bulldozer[bot]
5961aaf588 Windows friendly stdin reads for passwords (#5010)
* cast os.stdin filehandle since on windows syscall.Stdin is not int
* import ordering
* Merge branch 'master' into prysm-5008-windows-stdin-not-int
2020-03-05 07:31:11 +00:00
prylabs-bulldozer[bot]
e635e5b205 Feature flag to gate prune state upon start up (#5011)
* Added feature flag to gate prune state upon start up
2020-03-05 06:24:59 +00:00
Raul Jordan
66991f0efe Spanner db (#4997)
* rem slasher proto

* with cache

* delete old code

* moving to bytes.go fix traces

* moving to bytes.go fix traces

* raul feedback

* raul feedback

* add eviction test

* ivan feedback

* ivan feedback

* some comment improvements

* test included for bytes and bool

* import

* cleanup

* tests pass

* fill in all fields in test

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-04 23:35:14 -06:00
prylabs-bulldozer[bot]
e339b07ac7 Remove unused DB functions and proto from Slasher (#4996)
* Remove unused DB functions
* goimports
* Fix bug and improve tests
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-remove-old-db
2020-03-04 22:29:36 +00:00
prylabs-bulldozer[bot]
139f51efaa LRU cache for state gen (#5004)
* Add hot state cache
* Gaz
* Test
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into state-gen-lru-cache
* Merge refs/heads/master into state-gen-lru-cache
2020-03-04 22:09:21 +00:00
prylabs-bulldozer[bot]
a43a40c1c9 Create .bazelversion (#5003)
* Create .bazelversion
2020-03-04 21:49:22 +00:00
Raul Jordan
0bdd0dba67 Add warning if shell expansion characters make it in to the path (#5001) 2020-03-04 12:34:23 -06:00
Victor Farazdagi
239efe7410 init sync: adds blocks fetcher service (#4978)
* init sync: adds blocks fetcher service

* init-sync: rework ctx handling

* init-sync: fix long lines

* removes redundant method

* adds buffer to requests channel

* adds jaeger spans

* fixes overly long comment line
2020-03-04 20:19:09 +03:00
terence tsao
e5da756c47 Add error definitions for state gen pkg (#5000)
* Add error file
* Fmt
* Merge branch 'master' into error-file
2020-03-04 16:50:08 +00:00
terence tsao
a612557fe7 Add log file (#4999) 2020-03-04 10:30:52 -06:00
Raul Jordan
26582cbf2e Stub Slasher RPC Methods (#4995)
* rem slasher proto
* Remove unneeded protos
* Rework api proto
* Add back proto
* regen slashing proto
* Merge branch 'rem-rpc' of github.com:prysmaticlabs/prysm into rem-rpc
* Fix comments
* Merge branch 'rem-rpc' of https://github.com/prysmaticlabs/Prysm into rem-rpc
2020-03-03 22:09:35 +00:00
Raul Jordan
d68636bc7f Remove Deprecated Slasher Code (#4994)
* remove old code
* Clear out service from bazel
2020-03-03 19:40:09 +00:00
terence tsao
699e7efc61 Add epoch boundary root map (#4993)
* Add to struct

* Add implementations

* Tests
2020-03-03 13:07:34 -06:00
Ivan Martinez
ba6b8c9321 Update outdated spec function names and comments (#4992)
* Update outdated spec function names and comments
* VerifyMerkleBranch
* Remove error handle
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slash-spec-refresh
* Merge branch 'master' into slash-spec-refresh
2020-03-03 18:29:41 +00:00
Ivan Martinez
cc5fc0af1a Plug-in double voting detection into detection service (#4960)
* Add double vote detection to spanner
* Add documentation
* Update slasher/detection/attestations/spanner.go
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-spanner-double
* Merge branch 'slasher-spanner-double' of https://github.com/0xKiwi/Prysm into slasher-spanner-double
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-spanner-double
* Gazelle
* Add double vote detection func
* Implement double voting detection
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-implement-double
* Merge branch 'master' into slasher-implement-double
* Merge branch 'slasher-implement-double' of https://github.com/0xKiwi/Prysm into slasher-implement-double
* Fix typo
* Remove filter, replace with slot + committee index
* Change bloom filter to 2 sig bytes
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-change-filter
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-implement-double
* Merge branch 'slasher-change-filter' of https://github.com/0xKiwi/Prysm into slasher-implement-double
* Change detection to use prefix
* Fix runtime
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-implement-double
* Fix bug and comments
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-implement-double
* Fix flaky test
* Merge branch 'master' into slasher-implement-double
* Improve logs
* Merge branch 'slasher-implement-double' of https://github.com/0xKiwi/Prysm into slasher-implement-double
* Add ok check
* Fix test
* Merge branch 'master' into slasher-implement-double
2020-03-03 18:08:21 +00:00
Nishant Das
0093218e41 Add Noise Support To Prysm (#4991)
* add dep
* add feature config
* Merge branch 'master' into addNoiseSupport
* gaz and victor's review
* Merge branch 'addNoiseSupport' of https://github.com/prysmaticlabs/geth-sharding into addNoiseSupport
2020-03-03 17:45:51 +00:00
tzapu
c09ae21ab0 show full public key in metrics (#4988)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-03 10:36:49 -06:00
Jim McDonald
4c43616647 Add ProtecingKeymanager interface and calls (#4982)
* Add ProtectedKeymanager interface and calls

* Rename interface

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-03 10:21:58 -06:00
shayzluf
59575bcac9 fuzz core/blocks package (#4907)
* fuzz core/blocks package

* gaz goimports

* fix test

* terence feedback

* terence feedback

* add error to domain. halfway through

* adding error to domain

* goimports

* added error handling to test

* error instead of continue

* terence and nishant feedback

* domain error handling

* domain error handling

* handle nil validator in ReadOnlyValidator creation

* goinmports

* [4]byte domain type

* [4]byte domain type

* [4]byte domain type fix tests

* fix tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-03 19:02:14 +05:30
Ivan Martinez
703ce63c12 Change span representation to a struct in Slasher (#4981)
* Remove filter, replace with slot + committee index

* Change bloom filter to 2 sig bytes

* Fix comment

* Fix line
2020-03-03 15:28:23 +05:30
Ivan Martinez
69845cad77 Fix stalling bug in slashing pool (#4985)
* Fix bug in slashing pool

* Remove unneeded logs

* remove line

* Remove val

* Fix tests

* Add regression test

* Add nother regresstion test
2020-03-03 00:52:35 -05:00
terence tsao
a07e604eea Better logs for forking (#4966)
* Move `updateHead` to ReceiveAttestationNoPubsub and better forking mesasges

* Typo

* Import

* f

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-02 09:51:26 -08:00
Jim McDonald
044d72064f Pre-allocate slices when reporting validator performance (#4979) 2020-03-02 23:00:23 +08:00
Jim McDonald
5cb51263b0 Fix crash when reporting validator metrics (#4968)
* Fix crash when reporting validator metrics
* Merge branch 'master' into validator-reporting-crash
* Merge branch 'master' into validator-reporting-crash
2020-03-02 08:51:07 +00:00
terence tsao
d9d4a9954e Archived point index DB methods (#4977) 2020-03-02 08:55:38 +01:00
Nishant Das
3989b65667 Add Flag for Checking HeadState (#4974)
* gate feature
* imports
* add flag
* Merge branch 'master' into gateFeature
2020-03-02 06:06:20 +00:00
terence tsao
9fe2cdd5ca State summary DB methods (#4971)
* Define proto
* Regen
* Delete slasher.pb.go
* Gaz
* Merge branch 'state-summary-proto' of https://github.com/prysmaticlabs/prysm into state-summary-proto
* Revert "Delete slasher.pb.go"

This reverts commit 19bfa21cd3.
* Add state_summary.go
* Test
* Gaz
* Interaces
* pass through
* Merge refs/heads/master into state-summary-db
* Merge refs/heads/master into state-summary-db
2020-03-02 04:27:55 +00:00
terence tsao
cb163d8910 Define state summary proto (#4967)
* Define proto

* Regen

* Delete slasher.pb.go

* Gaz

* Revert "Delete slasher.pb.go"

This reverts commit 19bfa21cd3.

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-01 23:10:52 -05:00
Nishant Das
cd6e06f01e add helpers (#4972) 2020-03-01 16:22:49 +01:00
Preston Van Loon
af5cc31565 Use correct image name for validator debug image (#4963)
* Use correct image name for validator debug image
* Merge refs/heads/master into fix-validator-debug-image
2020-02-28 01:22:32 +00:00
terence tsao
5a5cdc1b02 Removed (#4962) 2020-02-27 17:06:25 -08:00
Ivan Martinez
31b1e6a7a8 Add double vote detection to spanner (#4954)
* Add double vote detection to spanner

* Add documentation

* Update slasher/detection/attestations/spanner.go

* Gazelle

* Fix filter output
2020-02-27 17:21:05 -05:00
Preston Van Loon
05a5bad476 Migrate SubmitAggregateAndProof (#4951)
* Remove unused services, mark everything as deprecated, regen pb.go
* remove some code from cluster pk manager, gazelle
* goimports
* remove mocks
* Update WORKSPACE, deprecate old method, stub new method
* Move implementation to ethereumapis definition
* gofmt
* Add TODO for #4952
* Merge branch 'master' into migrate-submitaggregateandproof
* Update validator client to use new submit aggregate and proof method
* Merge branch 'migrate-submitaggregateandproof' of github.com:prysmaticlabs/prysm into migrate-submitaggregateandproof
* gaz
* rename
* rename
* Merge refs/heads/master into migrate-submitaggregateandproof
* Merge refs/heads/master into migrate-submitaggregateandproof
* Merge refs/heads/master into migrate-submitaggregateandproof
* Merge refs/heads/master into migrate-submitaggregateandproof
* Merge refs/heads/master into migrate-submitaggregateandproof
* fix tests
* Merge branch 'migrate-submitaggregateandproof' of github.com:prysmaticlabs/prysm into migrate-submitaggregateandproof
2020-02-27 20:23:35 +00:00
terence tsao
2fef9d3e5e Move blockchain service metrics package (#4959)
* Moved metrics package
2020-02-27 19:38:22 +00:00
Raul Jordan
14b3181e67 Plug-In Attester Slashing Detection Into Slasher Runtime (#4937)
* more spanner additions

* implement iface

* begin implement

* wrapped up spanner functions

* rem interface

* added in necessary comments

* comments on enums

* begin adding tests

* plug in surround vote detection

* saved indexed db implementation

* finally plugin slashing for historical data

* Small fixes

* add in all gazelle

* save incoming new functions

* resolve todo

* fix broken test channel item

* tests passing when fixing certain arguments and setups

* Add comment and change unimplemented

* find surround

* added in gazelle

* gazz

* feedback from shay

* fixed up naming

* Update

* Add tests for detectSurroundVotes

* Remove logs

* Fix slasher test

* formatting

* Remove unneeded condition

* Test indices better

* fixing broken build

* pass tests

* skip tests

* imports

* Update slasher/detection/attestations/attestations_test.go

* Update slasher/beaconclient/historical_data_retrieval_test.go

* Address comments

* Rename function

* Add comment for future optimization

* Fix comment

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-02-27 12:22:39 -05:00
Ivan Martinez
e7b94123ce Add interface for spanner and MockSpanner (#4956)
* Add interface for spanner

* Add MockSpanner for future testing

* Add comment

* gazelle
2020-02-26 22:48:02 -06:00
Ivan Martinez
76aad0f444 Add simple bloom filter implementation for double vote detection (#4948)
* Add simple bloom filter implementation for detecting similarity of 1 key

* Change hash to keccak

* Fix receiver name

* Fix bug

* Fix comments and organize test

* Add comment detailing hash functions

* Add bloom to test names
2020-02-26 19:37:36 -05:00
terence tsao
2c1c41d1d6 Move stategen package under /state (#4950)
* Move state gen to state
* Merge branch 'master' into move-state-gen
* Merge refs/heads/master into move-state-gen
2020-02-26 22:09:14 +00:00
Preston Van Loon
921a44d9fd Clean up unused / deprecated protobuf definitions (#4949)
* Remove unused services, mark everything as deprecated, regen pb.go
* remove some code from cluster pk manager, gazelle
* goimports
* remove mocks
* Merge branch 'master' into remove-deprecated-proto-defs
2020-02-26 21:15:36 +00:00
Raul Jordan
22bbed0059 Stream Indexed Attestations RPC Implementation (#4941)
* stream indexed attestations impl
* mock regen
* test for stream indexed
* atts test
* no bls
* gaz
* Merge refs/heads/master into implement-stream-indexed
* use feed for atts instead
* remove unused imports
* Merge refs/heads/master into implement-stream-indexed
* fix tests in beacon
* properly use pointers
* imports
* import
2020-02-26 20:14:22 +00:00
terence tsao
b1231f3ddf Replay blocks and generate state without sig verification (#4943)
* New file

* Add transition_stategen.go

* Update ProcessBlockForStateRoot

* Feature flags

* Fixed tests

* Gaz

* Make them private

* E2e flags

* Gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-26 10:40:33 -06:00
Ivan Martinez
c2b30cf801 Prepare spanner for double vote detection and fix a few bugs (#4940)
* Rename vars for clarity

* Change spanner to take target epoch as key

* Fix tests, add multiple val test

* Fixes

* Change the spanner to take in att on detect

* Add back proto diagram tests

* Remove unneeded comments
2020-02-25 21:35:34 -06:00
Preston Van Loon
b647ca5dd2 Release --initial-sync-cache-state (#4938)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-24 19:20:00 -06:00
Preston Van Loon
c0f1a1d674 Validator: cache domain data calls (#4914)
* Use a domain data cache to reduce the number of calls per epoch

* fix fakevalidator

* Refactor to use a feature flag, use proto.clone, move interceptor to its own file

* gofmt

* fix comment

* tune cache slightly

* log if error on domain data

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-24 13:02:45 -08:00
Preston Van Loon
855f5d2986 Add a configurable flag for gRPC retries (#4926)
* Add a configurable flag for gRPC retries
* Merge refs/heads/master into configurable-retry
* Merge refs/heads/master into configurable-retry
* Merge refs/heads/master into configurable-retry
* Merge refs/heads/master into configurable-retry
* Merge refs/heads/master into configurable-retry
* add in flag to usage
2020-02-24 18:00:22 +00:00
Jim McDonald
5f0ed8388e Use --deposit-contract with default value (#4925)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-24 11:24:05 -06:00
Jim McDonald
a951c4f6ab Add Ethereum 1 block->timestamp cache (#4924)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-24 10:53:35 -06:00
terence tsao
0470d37072 Consider missing validator count for performance metric (#4928)
* Consider missing validator count
* Use validator count reported
* Merge branch 'master' into missing-validators
* Merge refs/heads/master into missing-validators
2020-02-24 16:28:22 +00:00
terence tsao
15b649d760 Fix aggregated attestation pool grows large in size (#4932)
* Add metrics

* Use it

* Use it

* Fixed exp time and tests

* Update on save too

* Expose getters

* One epoch purge time

* Fixed a timing issue

* Clean up

* Gazelle

* Interface

* Prune every epoch

* Aggregate twice per slot

* Revert attsToBeAggregated

* Delete expired atts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-24 10:18:34 -06:00
terence tsao
2e56a59473 attestations in pool count metrics (#4930)
* Add metrics
* Use it
* Use it
* Expose getters
2020-02-23 22:30:52 +00:00
Raul Jordan
6fe86a3b30 Define an Efficient Spanner Struct Implementation for Slasher (#4920)
* more spanner additions

* implement iface

* begin implement

* wrapped up spanner functions

* rem interface

* added in necessary comments

* comments on enums

* begin adding tests

* test for detection

* add all detection tests

* moar tests

* tests for deleting pass

* dd test for update spans

* tests for updating

* include tracing utils

* gaz

* add mutexes

* ivan feedback
2020-02-22 08:57:24 -06:00
Nishant Das
83945ca54b Shift Stateutils to State Package (#4921)
* shift over
* new changes
* imports
* Merge branch 'master' into shiftUtils
2020-02-21 16:52:21 +00:00
Nishant Das
47bb927029 Fix Fork Copying (#4922)
* add fix and reg test

* goimports
2020-02-21 08:49:42 -05:00
garyschulte
597b21c40a fix missing metrics label on attetation fail (#4917)
Co-authored-by: garyschulteog <30323939+garyschulteog@users.noreply.github.com>
2020-02-20 21:28:55 -06:00
Raul Jordan
39aa791dcc Add Slashings Into Blocks from Pool (#4902)
* tests pass

* fix broken test

* addressed feedback

* Update beacon-chain/rpc/validator/proposer_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update beacon-chain/rpc/validator/proposer_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
2020-02-20 15:10:51 -06:00
Ivan Martinez
90ed37a655 Cleanup detection code (#4915) 2020-02-20 08:56:37 -06:00
Raul Jordan
d143187b7e Request All Indexed Attestations Since Genesis in Slasher on Startup (#4894)
* include fixes

* rev

* logrus

* tests for query sync status and chain head

* begin tests for indexed atts

* test passing for requesting historical atts

* Update slasher/beaconclient/chain_data_test.go

* Update slasher/beaconclient/historical_data_retrieval.go

* lint

* fixed up wanted vs receied

* fix mock

* gazelle

* fix broken build

* tests pass

* dep

* gaz

* add dep

* tests pass

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-19 16:26:14 -06:00
Preston Van Loon
3735e6b8af Add a clarifying comment from #4909 (#4911)
* Add a clarifying comment from #4909
* Merge refs/heads/master into clarify
2020-02-19 21:10:56 +00:00
Jim McDonald
deb76f1c15 Fix double period in span name (#4910) 2020-02-19 14:52:44 -06:00
Jim McDonald
6baffd4ccb Infostream (#4760)
* Add validators stream

* Ignore unknown keys rather than error on them

* Reduce accesses to common structures

* Ensure correct information returned for deposited validators

* Short-term cache for remote deposit data

* Name epoch duration for clarity

* Break out duplicated logic in to a single function

* Add capacities for slices and maps where appropriate

* Break out functions; add tests

* Allow stream errors not related to context

Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-19 11:09:23 -06:00
terence tsao
731cc0bd44 Handing pending atts if they dont have the state (#4904) 2020-02-19 06:46:30 -08:00
Nishant Das
641ad51dd4 check db for justified state (#4905) 2020-02-19 22:18:44 +08:00
Nishant Das
8e55c81bd5 Delete States More Efficiently (#4909)
* only sync at end of method

* chunk roots

* very fast iteration

* delete correctly
2020-02-19 19:36:37 +08:00
Preston Van Loon
f737267e54 gRPC retry requests (#4908)
* gRPC retry requests

* with 5 retries default
2020-02-19 18:42:17 +08:00
Nishant Das
44856f9500 Add Unsafe Sync (#4906)
* add unsafe flag

* imports

* Use finalized epoch

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-02-19 00:25:07 -08:00
Nishant Das
4389e9d3c9 Add mempool feature flag (#4824) (#4903)
* Add mempool feature flag

* gate put too

* fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-19 14:29:49 +08:00
Nishant Das
655f57e3f2 Bound Initial Sync Cache Size (#4844)
* bound initial sync

* fix lint

* Revert "Better block attestation inclusion (#4838)"

This reverts commit 090d9627fe.

* add memory pool

* more fixes

* revert changes

* add hack

* revert hack

* push halving

* bring back hack

* increase cache size

* more fixes

* more changes

* new fixes

* add test

* add reverse test

* more tests and clean up

* add helper

* more cleanup and tests

* fix test

* remove code

* set gc percent flag

* lint

* lint

* Fix comment formatting

* Fix some formatting

* inverse if statement

* remove debug log

* Apply suggestions from code review

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update beacon-chain/state/getters.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update beacon-chain/db/kv/state.go

* integrate state generator

* gaz

* fixes

* terence's review

* reduce bound further

* fix test

* separate into new files

* gaz

* mod build file

* add test

* revert changes

* fix test

* Update beacon-chain/core/helpers/slot_epoch.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* handle edge case

* add back test

* fix test again

* handle edge case

* Update beacon-chain/blockchain/init_sync_process_block.go

* Update beacon-chain/blockchain/init_sync_process_block.go

* Update beacon-chain/stategen/service_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update beacon-chain/blockchain/init_sync_process_block.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update beacon-chain/stategen/service.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update beacon-chain/stategen/service.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul's review

* raul's review

* fix refs

* terence's review

* one more fix

* Update beacon-chain/blockchain/init_sync_process_block.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-02-18 15:10:54 -06:00
terence tsao
c0d4cabdb7 Check seeds (#4901) 2020-02-18 12:05:36 -06:00
terence tsao
0e37b4926a Export LoadBlocks and ReplayBlocks (#4898)
* Define StateGenerator

* Gaz

* Delete interface.go

* Update BUILD.bazel

Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-18 13:54:56 +08:00
shayzluf
25308ef9fa fuzzing core/state package without skip slot cache (#4883)
* fuzzing core/state package

* named error msg

* err comment

* terence feedback

* preston feedback

* preston feedback

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-18 10:38:15 +05:30
Preston Van Loon
40afef8b9e Only set gc percent if the flag is set (#4899)
* only set gc percent if the flag is set
2020-02-18 01:37:35 +00:00
Nishant Das
c7d0ced5d1 Utilise a Flag to Toggle With the GC (#4897)
* set flag
* Merge refs/heads/master into setGC
2020-02-18 00:37:37 +00:00
Nishant Das
3d12322103 Use Memory Pool for Randao Mixes (#4896)
* add mem pool

* use mem pool

* Update shared/memorypool/memorypool.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update shared/memorypool/memorypool.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-02-18 08:25:39 +08:00
Raul Jordan
b4881e3cd5 Save Attestations In Initial Sync if Archive Enabled (#4895)
* receive block enable archive
* add to initial sync func
* Merge branch 'master' into archive-save-atts
2020-02-17 22:42:21 +00:00
Preston Van Loon
d1eaa8e09e Define debug images for Prysm beacon chain and validator binaries (#4893)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-17 14:13:34 -08:00
Raul Jordan
5db8c5ad0c Implement ListIndexedAttestations Endpoint in Prysm (#4892)
* update patch and workspace

* stub methods

* implementation of indexed attestations list

* include latest ethereumapis

* update request type

* compute committee pure function

* use compute committee helper

* add test into list indexed attestations

* regenerate mock

* imports and out of range check

* test passing for archived epoch

* add comment

* comment

* better comment on func

* throw in continue instead
2020-02-17 15:57:13 -06:00
terence tsao
d7db8b1f5d Replay block same slots different root edge case (#4889) 2020-02-17 12:33:00 -07:00
terence tsao
6b8ec26c56 Handle nil head block in cache (#4888)
* Nil check

* Fixed tests

* Covered rest of the codebase

* Race tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-17 12:21:42 -06:00
terence tsao
9b2aa66667 Fix reply block edge cases and tests (#4881)
* Covered 2 edge cases and tests

* Consider process epoch
2020-02-17 13:10:23 +08:00
terence tsao
b9c140c17d Hot/cold state management: Replay blocks and gen state (#4877)
* Starting stategen

* Replay implementations

* Replay tests

* Gazelle

* Fixed tests

* Dont have to verify sig
2020-02-17 07:28:20 +08:00
Aranha
8885d715f2 fixed panic: runtime error: integer divide by zero #4777 (#4823) 2020-02-16 15:13:03 -07:00
Preston Van Loon
0a2763b380 Check attestation bitlist length in aggregation to prevent panic (#4876)
* Check attestation bitlist length in aggregation to prevent panic

* Add case for overlap too

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-16 10:18:48 -07:00
Raul Jordan
3fcb4e8a12 Update README for Patching Ethereum APIs (#4871)
* edit readme
* Merge branch 'master' into readme-patch
* Merge branch 'master' into readme-patch
2020-02-16 17:07:06 +00:00
Preston Van Loon
db68c8a57b Enable attestation cache flag by default, deprecate feature flag (#4873)
* Enable attester flag by default
2020-02-16 01:07:14 +00:00
SuburbanDad
7899dc115e prevent additional array OOB errors for validator balances (#4872)
Co-authored-by: garyschulte <garyschulte@gmail.com>
2020-02-15 15:47:45 -07:00
terence tsao
456ac5f9a3 Better head object coupling for chain service (#4869)
* Done
* Fixed lock
* Fixed all the tests
* Comments
* Fixed more tests
* Merge branch 'master' into better-head-obj
* Fixed more tests
* Merge branch 'better-head-obj' of git+ssh://github.com/prysmaticlabs/prysm into better-head-obj
* Prestons feedback & fixed test
* Nishant's feedback
* Participation edge case
* Gaz
* Merge branch 'master' into better-head-obj
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into better-head-obj
* Raul's feedback
* Merge branch 'better-head-obj' of git+ssh://github.com/prysmaticlabs/prysm into better-head-obj
2020-02-15 18:57:49 +00:00
terence tsao
92a91476ef Better log (#4870) 2020-02-15 04:09:26 -08:00
Raul Jordan
868c8f5dd4 Detection Service Creation (#4867)
* visibility added

* register in node

* fixed up imports

* include detection listeners for feed

* subscribe to blocks and todos

* tests passing

* todos

* pkg comment
2020-02-14 13:03:25 -06:00
Raul Jordan
38fed735b2 Send Slashing Objects to Beacon Node via RPC (#4866)
* submit slashing objects

* tests complete
2020-02-14 11:11:14 -07:00
terence tsao
4a446329b2 Prevent balance goes out of bound (#4865)
* Prevent balance goes out of bound
* Prevent balance goes out of bound
* Merge branch 'master' into fix-balance
2020-02-14 16:59:20 +00:00
Ivan Martinez
6b40fa01ec Add detection package for slashing detection functions (#4861)
* Move detection to its own package

* Fix renames

* More fixes

* Revert "Fix renames"

This reverts commit 3200f89a1b.

* Fix

* Fix renames again

* Fix another rename

* Fix comment

* unused

* add comment

* gazelle

* Add spans

* Unexport helper functions

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-14 10:46:55 -06:00
Nishant Das
214121b0ab Dont Initialize Zeroed Out State (#4863)
* dont initialize map with empty registry
* check zeroed pointer
* check zeroed pointer
* imports
* gaz
* better check
* gaz
* fix it finally
* finally fix it
* gaz
2020-02-14 12:19:50 +00:00
Raul Jordan
b99779fe94 Implementing Slasher Node Runtime (#4856)
* include slasher node
* slasher node runtime added
* added in register for beacon client
* streaming blocks fixed up
* all subs working
* gazelle
* handle errors
* Merge branch 'master' into slasher-node
* Update slasher/node/BUILD.bazel
* x up slasher test
* Merge refs/heads/master into slasher-node
* Merge refs/heads/master into slasher-node
* add in force clear into usage
* Merge refs/heads/master into slasher-node
* usage
* Merge refs/heads/master into slasher-node
* Fix streamblocks test
* Merge refs/heads/master into slasher-node
* Fix docker image compile
* Merge branch 'slasher-node' of https://github.com/prysmaticlabs/Prysm into slasher-node
2020-02-14 07:09:54 +00:00
Nishant Das
b263efefeb Copy Checkpoint Root Properly (#4862)
* change to custom hashing
* Merge branch 'master' into minorOpt
* goimports
* Merge branch 'minorOpt' of https://github.com/prysmaticlabs/geth-sharding into minorOpt
* gaz
* pad to 32 bytes
* one more case
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into minorOpt
* one more case
* more cases
* some more cases
* do it better
2020-02-14 06:35:16 +00:00
Nishant Das
ecfd7bdfa1 Change to Custom Hashing for BlockHeaders (#4860)
* change to custom hashing
* Merge branch 'master' into minorOpt
* goimports
* Merge branch 'minorOpt' of https://github.com/prysmaticlabs/geth-sharding into minorOpt
* gaz
* pad to 32 bytes
2020-02-14 05:19:20 +00:00
Raul Jordan
549b0f66fa Include Slashing Submission Endpoints + Slashing Pool in Beacon Node (#4858)
* add to workspace

* impl

* include tests for func

* fix broken build

* test passing, found 2 bugs

* add errors package

* added in mockgen

* we check for insertion into the pool based on attester slashings

* test passing

* proper test

* Update beacon-chain/rpc/beacon/slashings.go

* Update beacon-chain/rpc/beacon/slashings_test.go
2020-02-13 22:20:45 -06:00
garyschulteog
27ec40f269 Remove remaining instances of proto.clone() (#4806)
* prysm-4757 remove proto.Clone() in favor of existing getters.Copy* methods
* prysm-4757 added a bunch of copy methods, and broke some tests
* squash commits
 fix tests and getter implementations
 remove usage of CopySignedBeaconBlock from ReceiveBlockNoVerify
* correctly copy Deposit proof and remove proto.clone() again
* Merge branch 'master' into prysm-4757-no-proto-clone
* Merge branch 'master' into prysm-4757-no-proto-clone
* Fix for comments, inline possible function calls
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into prysm-4757-no-proto-clone
* Merge branch 'master' into prysm-4757-no-proto-clone
* updated with feedback from review
* Merge branch 'master' into prysm-4757-no-proto-clone
* Merge branch 'master' into prysm-4757-no-proto-clone
* Merge branch 'master' into prysm-4757-no-proto-clone
2020-02-14 01:03:51 +00:00
terence tsao
bb60b2f523 Add balances to voting summary log (#4857) 2020-02-13 14:52:35 -08:00
Preston Van Loon
4072eb711f Beacon State: More consistent nil return for state (#4854)
* More consistent nil return for state
* Merge refs/heads/master into nil-state
* Add a check for encode(nil)
* Merge branch 'nil-state' of github.com:prysmaticlabs/prysm into nil-state
* fix test, thanks @rauljordan
* fix tests
* gofmt
2020-02-13 20:34:50 +00:00
Ivan Martinez
2473680759 Add spans to Slasher DB functions (#4855)
* Add interface and move slashing types to /types package

* Add spans for all DB functions

* Fix packages

* Fix func call
2020-02-13 13:51:30 -06:00
Ivan Martinez
c44a30672e Change slasher DB structure to mirror beacon-chains (#4848)
* Add interface and move slashing types to /types package

* WIP restructure to match beacon chain DB

* Fix build

* Fix comment

* Fix comments

* fix comments for sure

* Use wrapper function for evict

* Remove unused

* Update slasher/db/kv/kv.go

* Update slasher/db/testing/BUILD.bazel

* Update slasher/db/types/BUILD.bazel

* Update slasher/db/types/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-13 10:19:46 -06:00
Nishant Das
db21f98053 Change Positionining of Warning Log (#4850)
* change logging

* change positioning of log

* fix status test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-13 00:39:15 -08:00
Nishant Das
b7adf55336 Revert "Check HeadState First (#4830)" (#4851)
This reverts commit 601f93a0a1.
2020-02-12 23:33:45 -08:00
Preston Van Loon
f06dfd6108 Secure lock when accessing the map only (#4849)
* Secure lock when accessing the map

* wrong lock

* Remove some deadlocks
2020-02-12 20:33:14 -06:00
Preston Van Loon
bb4c8ba83e Create backups without freelist. (#4847)
* Create backups without freelist. This is a bit slower, but more accurate
* Merge refs/heads/master into better-backups
2020-02-12 22:01:07 +00:00
terence tsao
16fef1c658 Better attesting summary reporting (#4845) 2020-02-12 13:38:19 -08:00
Preston Van Loon
090d9627fe Better block attestation inclusion (#4838)
* Fill blocks with unaggregated atts when possible, don't delete from pool until the block is submitted

* Add back invalid atts removal

* Don't delete attestations unless they can be aggregated

* comment and test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-12 11:54:07 -08:00
Nishant Das
c7698cda1c Add Pending Attestation Lock fix (#4840)
* add pending lock fix

* use easier locking
2020-02-12 08:58:40 -08:00
Nishant Das
a11f1804a2 lock fix (#4839) 2020-02-11 23:33:15 -08:00
Preston Van Loon
0882908d2c Improve attestation cache check from O(n) to O(1) access time (#4837)
* use O(1) method to access the blocks
* Merge branch 'master' into better-has-aggregated-attestation
2020-02-11 21:57:42 +00:00
Raul Jordan
a7325315a8 Include Beacon Client Package in Slasher (#4835)
* begin beacon client

* adding in the proper receivers

* include all parts of the beacon client

* all comments included

* visibility and package comment
2020-02-11 15:35:31 -06:00
Raul Jordan
c3785e03ba Add Lock to Processing Pending Attestations (#4833)
* add lock to pending atts
* Merge branch 'master' into add-att-lock
* changed to rlock
* broken build
* Merge refs/heads/master into add-att-lock
* refactor to prevent deadlock
2020-02-11 20:01:33 +00:00
Raul Jordan
297247d915 Add Paginated Attestation Pool to Prysm (#4827)
* added pagination to atts

* tests pass for atts

* add mock

* fix

* add to val

* fix

* add in proper mock

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-11 13:18:30 -06:00
Nishant Das
601f93a0a1 Check HeadState First (#4830)
* easy optimization
* Merge refs/heads/master into easyOptimization
* cache miss
2020-02-11 16:22:51 +00:00
Nishant Das
8c90e38770 Fix Goodbye RPC handler (#4831)
* fix goodbye messages

* fix test

* fix test
2020-02-11 09:08:01 -06:00
Preston Van Loon
661e48f549 Revert state copy PR #4811 (#4825)
* Revert "Add mempool feature flag (#4824)"

This reverts commit b3f2a330dc.
* Revert "Optimize Copying of Fields (#4811)"

This reverts commit 4f654d30ac.
2020-02-11 01:47:31 +00:00
Preston Van Loon
b3f2a330dc Add mempool feature flag (#4824)
* Add mempool feature flag

* gate put too

* fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-10 16:20:48 -06:00
Preston Van Loon
5c14cd64c5 nil check (#4822)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-10 14:01:37 -08:00
terence tsao
56fcca69d7 Optimize Hasblock (#4821)
* Expose node

* Comments

* Extra line

* Add has block

* Test

* Usages

* Fixed tests
2020-02-10 15:48:28 -06:00
shayzluf
02b6d7706f Slasher committees cache (#4812)
* add committees cache
* committees cache usage
* fix test
* fix log
* goimports
* Merge branch 'master' of github.com:prysmaticlabs/prysm into slasher_committees_cache

# Conflicts:
#	slasher/service/data_update.go
* fix imports
* fix comment
* fix comment
* Merge refs/heads/master into slasher_committees_cache
* Merge refs/heads/master into slasher_committees_cache
* Update slasher/cache/BUILD.bazel

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into slasher_committees_cache
* Merge refs/heads/master into slasher_committees_cache
* Merge refs/heads/master into slasher_committees_cache
* added in the service context
* baz
* Merge refs/heads/master into slasher_committees_cache
* Merge refs/heads/master into slasher_committees_cache
2020-02-10 20:09:14 +00:00
Preston Van Loon
0ed8246e28 Release flag to aggregate attestations in fork choice. (#4820)
* Release flag to aggregate attestations in fork choice.
* Merge refs/heads/master into aggregate-atts-in-fc
* fix test
* Merge branch 'aggregate-atts-in-fc' of github.com:prysmaticlabs/prysm into aggregate-atts-in-fc
* Merge refs/heads/master into aggregate-atts-in-fc
2020-02-10 19:57:30 +00:00
Jim McDonald
dfe52e1752 Add command to display private keys from keystore (#4793)
* Add command to display private keys from keystore

* Update output format

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-10 13:47:54 -06:00
terence tsao
52524d5acc Expose fork choice node (#4819)
* Expose node

* Comments

* Extra line

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-10 13:09:12 -06:00
Preston Van Loon
4598344918 Clear initial sync state caches after round robin sync (#4817)
* Clear initial sync state caches after round robin sync

* fix test mock

* lint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-10 10:39:18 -08:00
terence tsao
1a5c5153be Increase size to 10 (#4818) 2020-02-10 10:14:31 -08:00
terence tsao
6c00f5fff7 Update attester wait time (#4791)
* Update attester submit strategy

* Tests

* Gaz

* Fixed rest of the tests

* Updated design to use feed

* Use roughtime for Now

* gaz

* Gaz

* Send block processed after fork choice

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-02-10 10:59:55 -06:00
Nishant Das
4f654d30ac Optimize Copying of Fields (#4811)
* add new changes

* memory pool

* add test

* final optimization

* preston's review
2020-02-10 23:05:58 +08:00
Ivan Martinez
18fbdd53b9 Slasher proto and function renames (#4797)
* Rename elements for clarity
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-renames
* Fix test
* Rename more functions
* Cleanup
* Fix logs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into slasher-renames
* Reorganize and clean up logs
* Address comments
* Add comments
2020-02-10 05:57:20 +00:00
terence tsao
5be4fee810 Removed spans for fork choice helpers (#4808)
* Removed spans
2020-02-09 23:32:08 +00:00
terence tsao
8a02003d4b Feature flag to disable head update on attestation basis (#4802) 2020-02-09 11:44:17 -08:00
terence tsao
bdcd06a708 Handle head state for init sync cache state (#4800)
* Don't save nil head state
* Update head
* Don't update head on new att
* Handle initial sync state in DB can be nil
* Don't update head if it's nil
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into handle-init-sync-cache-state
* Merge refs/heads/master into handle-init-sync-cache-state
* Update beacon-chain/blockchain/head.go
2020-02-09 16:12:06 +00:00
Preston Van Loon
7e0d0502aa Prevent panic on wrong interface conversion (#4803)
* Prevent panic on wrong interface conversion
* remove import
2020-02-09 08:41:50 +00:00
terence tsao
f14ff34797 Delete block attestations from the pool (#4798) 2020-02-08 21:30:45 -08:00
terence tsao
16a0c9f463 Don't save nil head state (#4799)
* Don't save nil head state
* Update head
2020-02-09 05:08:21 +00:00
Preston Van Loon
70cb06d50f Report unhealthy if we think we are out of sync (#4796)
* Report unhealthy if we think we are out of sync
* gofmt
* Merge refs/heads/master into out-of-sync-unhealthy
2020-02-08 18:50:14 +00:00
Jim McDonald
0725e2dba7 Downgrade log entry (#4795) 2020-02-08 10:05:13 -08:00
terence tsao
031b51e294 Update head on per attestation and minor refactor clean ups (#4786)
* head.go
* Tests
* Tests
* Merge branch 'master' into call-head
* Merge refs/heads/master into call-head
* Merge refs/heads/master into call-head
* Merge refs/heads/master into call-head
* Merge branch 'master' into call-head
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into call-head
* Conflict
2020-02-08 02:05:43 +00:00
Preston Van Loon
015c8c4cd2 Use helper to aggregate attestations in pool (#4794)
* Aggregate attestations in pool
* test
* clarify test
* fix test
2020-02-08 01:03:50 +00:00
Nishant Das
efd27c7b2b Fix Dynamic Topic Subscriptions (#4767)
* check for sync status

* check for chainstart

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-08 06:49:17 +08:00
Nishant Das
f16a71f178 Revert "Update Slices More Efficiently" (#4790)
* Revert "Update Slices More Efficiently (#4789)"

This reverts commit 669e1ea787.
2020-02-07 16:58:36 +00:00
Nishant Das
669e1ea787 Update Slices More Efficiently (#4789)
* better cached states

* lint

* jim's review
2020-02-07 09:06:39 -06:00
Jim McDonald
7ba2c897ad Add location option for wallet keymanager (#4788)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-06 23:03:33 -06:00
Preston Van Loon
34178aff2a Strict verify attestations in pubsub (#4782)
* Verify attestations before putting them into the pool
* use existing method
* Validate aggregated ones too
* Revert "Validate aggregated ones too"

This reverts commit a55646d131.
* Merge branch 'master' of github.com:prysmaticlabs/prysm into verify-all-atts
* Add feature flag
* The remaining shared reference fields with conditional copy on write
* Merge branch 'master' into better-copy-2
* Merge branch 'better-copy-2' of github.com:prysmaticlabs/prysm into verify-all-atts
* gaz
* fix build, put into validate
* lint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into verify-all-atts
* why does goland do this to me
* revert unrelated change
* fix tests
* Update shared/featureconfig/config.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge refs/heads/master into verify-all-atts
* Update beacon-chain/blockchain/testing/mock.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* gofmt
2020-02-07 03:21:55 +00:00
shayzluf
4df74a3b9d Slashing operations pool (#4726)
* first iteration

* with initial tests

* comment fix

* fix lint

* Start fixing other tests

* Cleanup

* Finish att slashing tests

* Finish up tests for proposer

* Fix docs

* Fix tests

* Fix pending att slashings to not include duplicates

* Fix max list

* Add test to make sure no duplicate slashings

* Address comments, improve insertion

* Fix error

* Update beacon-chain/operations/slashings/service.go

* Update beacon-chain/operations/slashings/service.go

* Update beacon-chain/operations/slashings/service.go

* Update beacon-chain/operations/slashings/service.go

* include a helper function for deduplication, fix some comments

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-07 10:32:51 +08:00
Preston Van Loon
f6dfaef537 BeaconState: remaining shared reference fields with conditional copy on write (#4785)
* The remaining shared reference fields with conditional copy on write
* Merge branch 'master' into better-copy-2
2020-02-06 21:55:29 +00:00
Raul Jordan
a9d144ad1f Stream Blocks Functionality for RPC (#4771)
* stream blocks functionality included
* necessary tests for stream blocks and notifier
* gazelle and tests passing
* gazelle and tests passing
* Merge branch 'master' into stream-block
* Update beacon-chain/core/feed/block/events.go
* Merge refs/heads/master into stream-block
* Merge refs/heads/master into stream-block
* Merge refs/heads/master into stream-block
* Merge refs/heads/master into stream-block
* naming
* build
* Merge refs/heads/master into stream-block
* Merge refs/heads/master into stream-block
* Merge refs/heads/master into stream-block
* fix up tests
* Merge branch 'stream-block' of github.com:prysmaticlabs/prysm into stream-block
* Merge refs/heads/master into stream-block
* shay comment
* Merge refs/heads/master into stream-block
* Merge branch 'stream-block' of github.com:prysmaticlabs/prysm into stream-block
* Merge refs/heads/master into stream-block
2020-02-06 20:14:38 +00:00
terence tsao
9cf30002d4 Rlock for computing head (#4784)
* Add lock
* Space
2020-02-06 19:59:50 +00:00
AgentJ-WR
e2faa391c3 Fix(Genesis): Api genesis block now returns properly (#4736)
* begin state service
* begin on the state trie idea
* created beacon state structure
* add in the full clone getter
* return by value instead
* add all setters
* new state setters are being completed
* arrays roots exposed
*  close to finishing all these headerssss
* functionality complete
* added in proto benchmark test
* test for compatibility
* add test for compat
* comments fixed
* Merge branch 'master' into state-service
* add clone
* add clone
* remove underlying copies
* make it immutable
* integrate it into chainservice
* revert
* wrap up comments for package
* address all comments and godocs
* address all comments
* Merge branch 'master' into state-service
* clone the pending attestation properly
* Merge branch 'state-service' of github.com:prysmaticlabs/prysm into state-service
* properly clone remaining items
* tests pass fixed bug
* begin using it instead of head state
* prevent nil pointer exceptions
* Merge branch 'state-service' into use-state-in-runtime
* begin using new struct in db
* integrated new type into db package
* add proper nil checks
* using new state in archiver
* refactored much of core
* editing all the precompute functions
* done with most core refactor
* fixed up some bugs in the clone comparisons
* Merge branch 'state-service' into use-state-in-runtime
* append current epoch atts
* merged master
* add missing setters
* add new setters
* fix other core methods
* fix up transition
* main service and forkchoice
* fix rpc
* integrated to powchain
* some more changes
* fix build
* improve processing of deposits
* fix error
* prevent panic
* comment
* fix process att
* gaz
* fix up att process
* resolve existing review comments
* Merge branch 'master' into use-state-in-runtime
* resolve another batch of gh comments
* resolve broken cpt state
* revise testutil to use the new state
* begin updating the state transition func to pass in more compartmentalized args
* finish editing transition function to return errors
* block operations pretty much done with refactor
* state transition fully refactored
* got epoch processing completed
* fix build in fork choice
* fixing more of the build
* fix up broken sync package
* it builds nowww it buildssss
* revert registry changes
* Recompute on Read (#4627)

* compute on read

* fix up eth1 data votes

* looking into slashings bug introduced in core/

* able to advance more slots

* add logging

* can now sync with testnet yay

* remove the leaves algorithm and other merkle imports

* expose initialize unsafe funcs

* Update beacon-chain/db/kv/state.go

* lint

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* include master
* More Optimizations for New State (#4641)

* map optimization

* more optimizations

* use a custom hasher

* comment

* block operations optimizations

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* fixed up various operations to use the validator index map access

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* archiver tests pass
* fixing cache tests
* cache tests passing
* edited validator tests
* powchain tests passing
* halfway thru sync tests
* more sync test fixes
* add in tests for state/
* working through rpc tests
* assignments tests passed
* almost done with rpc/beacon tests
* resolved painful validator test
* fixed up even more tests
* resolve tests
* fix build
* reduce a randao mixes copy
* fixes under //beacon-chain/blockchain/...
* build //beacon-chain/core/...
* fixes
* Runtime Optimizations (#4648)

* parallelize shuffling

* clean up

* lint

* fix build

* use callback to read from registry

* fix array roots and size map

* new improvements

* reduce hash allocs

* improved shuffling

* terence's review

* use different method

* raul's comment

* new array roots

* remove clone in pre-compute

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul's review

* lint

* fix build issues

* fix visibility

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Merge branch 'use-state-in-runtime' of https://github.com/prysmaticlabs/geth-sharding into resolve-tests
* fix visibility
* build works for all
* fix blockchain test
* fix a few tests
* fix more tests
* resolve conf
* sync
* update validator in slashing
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* archiver passing
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* fixed rpc/validator
* progress on core tests
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* resolve broken rpc tests
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* blockchain tests passed
* fix up some tests in core
* Merge branch 'master' of github.com:prysmaticlabs/prysm into resolve-tests
* fix message diff
* remove unnecessary save
* Merge branch 'master' of github.com:prysmaticlabs/prysm into resolve-tests
* Save validator after slashing
* Update validators one by one
* another update
* fix everything
* Merge branch 'resolve-tests' of https://github.com/prysmaticlabs/geth-sharding into resolve-tests
* fix more precompute tests
* fix blocks tests
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* more elegant fix
* more helper fixes
* Merge branch 'resolve-tests' of https://github.com/prysmaticlabs/geth-sharding into resolve-tests
* change back ?
* fix test
* fix skip slot
* fix test
* reset caches
* fix testutil
* raceoff fixed
* passing
* Retrieve cached state in the beginning
* lint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into resolve-tests
* Fixed tests part 1
* Fixed rest of the tests
* Merge branch 'master' into optimize-process-att
* Minor changes to avoid copying, small refactor to reduce deplicated code
* Merge branch 'master' into resolve-tests
* Handle att req for slot 0
* New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689)

* Only populate merkle layers as needed, copy merkle layers on copy/clone.

* use custom copy

* Make maps of correct size

* slightly fast, doesn't wait for lock

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* Merge branch 'master' into resolve-tests
* Target root can't be 0x00
* Merge refs/heads/master into resolve-tests
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* Don't use cache for current slot (may not be the right fix)
* Merge branch 'resolve-tests' of git+ssh://github.com/prysmaticlabs/prysm into resolve-tests
* Merge branch 'resolve-tests' of github.com:prysmaticlabs/prysm into resolve-tests
* fixed up tests
* Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing...
* Align with prev logic for process slots cachedState.Slot() < slot
* Fix Initial Sync Flag (#4692)

* fixes

* fix up some test failures due to lack of nil checks

* fix up some test failures due to lack of nil checks

* fix up imports

* revert some changes

* imports

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Conflict
* Conflict
* resolve confs
* resolving further conflicts
* Better skip slot cache (#4694)

* Return copy of skip slot cache state, disable skip slot cache on sync

* fix
* Fix pruning
* Merge refs/heads/master into resolve-tests
* Merge refs/heads/master into resolve-tests
* copy on write method
* gaz
* fix confs
* Merge refs/heads/resolve-tests into copy-on-write
* fix tests
* Merge branch 'copy-on-write' of github.com:prysmaticlabs/prysm into copy-on-write
* fix up issues with broken tests
* Merge refs/heads/resolve-tests into copy-on-write
* Merge branch 'master' of github.com:prysmaticlabs/prysm into copy-on-write
* remove extra update
* remove debugging lines
* gofmt
* Merge refs/heads/master into copy-on-write
* Merge refs/heads/master into copy-on-write
* Merge refs/heads/master into copy-on-write
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Passing 1 to SetEndSlot
* Better way to get genesis block
* Merge branch 'master' of github.com:prysmaticlabs/prysm into bug/ApiGenesisBlock
* Checking for nil genBlk
* Testing conditional order
* Reverting conditions, and no idea why build fails
* Merge branch 'master' into bug/ApiGenesisBlock
* Saving to genesis block root in tests
* Merge branch 'bug/ApiGenesisBlock' of github.com:AgentJ-WR/prysm into bug/ApiGenesisBlock
* Saving root
* Merge branch 'master' of github.com:prysmaticlabs/prysm into bug/ApiGenesisBlock
* Adding regression test
* Updating regression test
* Merge branch 'master' into bug/ApiGenesisBlock
* Merge branch 'master' into bug/ApiGenesisBlock
* Merge branch 'master' into bug/ApiGenesisBlock
* Merge branch 'master' into bug/ApiGenesisBlock
2020-02-06 19:23:39 +00:00
terence tsao
5b83dffbe4 Use proto array forkchoice as default (#4778)
* Starting

* Removing feature flag

* Minor touchups service.go

* Conflict

* Started fixing test

* Init fork choice store for tests
2020-02-06 13:03:26 -06:00
terence tsao
b8383da468 Add forkchoiceAggregateAttestations to flag list (#4780)
* Added
* Merge branch 'master' into fix-flag
2020-02-06 18:09:51 +00:00
Preston Van Loon
c7fb28d42e Faster BLS publickey.Copy (#4770)
* Use balancesLength and randaoMixesLength to save copy on read
* use a cheaper copy for BLS publickey.Copy()
* Merge branch 'master' into bls-better-copy
* Merge refs/heads/master into bls-better-copy
* Merge refs/heads/master into bls-better-copy
* Merge refs/heads/master into bls-better-copy
* Merge refs/heads/master into bls-better-copy
* Merge branch 'master' of github.com:prysmaticlabs/prysm into bls-better-copy
* quick test
* Merge refs/heads/master into bls-better-copy
2020-02-06 17:35:38 +00:00
Preston Van Loon
3a9c8eb8b1 Log a warning if attempting to save a nil state (#4779)
* Log a warning if attempting to save a nil state
* Log a warning if attempting to save a nil state
2020-02-06 17:22:44 +00:00
Preston Van Loon
a9f1de354b Use balancesLength and randaoMixesLength to save copy on read (#4769)
* Use balancesLength and randaoMixesLength to save copy on read

* Update beacon-chain/state/getters.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
2020-02-06 10:06:27 -06:00
terence tsao
69c3d9dec2 Save attestation to DB gated by archival flag (#4776)
* Gated by flag
* Gaz
* Merge branch 'master' into archival-saves-att
2020-02-06 15:14:52 +00:00
Nishant Das
b99ae2cbe4 Remove All Batch DB Calls (#4775)
* remove all batch calls
2020-02-06 08:23:06 +00:00
Preston Van Loon
bfa103317e Disable forkchoice pre-processing of attestations (#4774)
* Disable forkchoice pre-processing of attestations
* space
* gaz
2020-02-06 07:53:08 +00:00
Nishant Das
dc1432f8d8 Attestation Verification Improvements (#4753)
* add fixes for sig verify
* minor fix
* Merge branch 'master' into fixSigVerify
* fmt
* Merge branch 'fixSigVerify' of https://github.com/prysmaticlabs/geth-sharding into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* use custom att copy
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* Merge refs/heads/master into fixSigVerify
* finnaly fixed all this
* Merge branch 'fixSigVerify' of https://github.com/prysmaticlabs/geth-sharding into fixSigVerify
2020-02-06 05:46:25 +00:00
terence tsao
85b379c08c Fix block tree cosmetic bugs (#4768)
* Fixed
* Merge branch 'master' into check-ready
2020-02-06 04:49:12 +00:00
Preston Van Loon
91b8760632 Beacon State: Track field references (#4751)
* track field references
* gofmt, RLock
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* cleanup comments
* Merge branch 'randao-ref-tracking' of github.com:prysmaticlabs/prysm into randao-ref-tracking
* Add a test for finalizer
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* maybe fix data race
* maybe fix data race
* temp comment out test file to find which one fails race test
* its definitely something with the checkpoint state cache
* its definitely something with the checkpoint state cache
* its definitely something with the checkpoint state cache
* Merge refs/heads/master into randao-ref-tracking
* This should fix it
* Merge branch 'randao-ref-tracking' of github.com:prysmaticlabs/prysm into randao-ref-tracking
* gaz
* Merge refs/heads/master into randao-ref-tracking
* Merge refs/heads/master into randao-ref-tracking
* turn off race detection, i dont understand why is broken
* Merge branch 'randao-ref-tracking' of github.com:prysmaticlabs/prysm into randao-ref-tracking
* feedback
* @nisdas feedback
* Revert "@nisdas feedback"

This reverts commit 6129cf84e6.
* Merge refs/heads/master into randao-ref-tracking
2020-02-06 04:07:23 +00:00
Preston Van Loon
27e7be6279 Add & use FinalizedCheckpointEpoch() to state (#4766)
* Add and use FinalizedCheckpointEpoch() to state
* comment
2020-02-06 03:26:15 +00:00
Ivan Martinez
ebd4541dcd Add test for GetBlock (#4765)
* Add a test for GetBlock
* Fix formatting
* Fix grafitti
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into test-getblock
* Merge branch 'master' into test-getblock
2020-02-05 23:56:38 +00:00
Raul Jordan
32b5b8fa69 Include Latest Ethereum APIs Definitions in Prysm (#4759)
* include latest ethereumapis and implement streams
* add comments and error unimpl
* Merge branch 'master' into blocks-stream
* add strm
* Merge refs/heads/master into blocks-stream
* Merge refs/heads/master into blocks-stream
* Merge refs/heads/master into blocks-stream
* add in mocks
* Merge branch 'blocks-stream' of github.com:prysmaticlabs/prysm into blocks-stream
* use right mock
* gaz
* ptypes
* gaz
* gomock dep
* Merge refs/heads/master into blocks-stream
2020-02-05 23:43:36 +00:00
terence tsao
c6e3d67241 Block tree enhancements (#4764)
* Add votes, correct conversion and green for head

* Starting testing

* Fix for run time
2020-02-05 17:30:55 -06:00
Raul Jordan
cb33deab36 batch to update (#4763) 2020-02-05 13:01:56 -08:00
terence tsao
113ac460c0 Update higherThanFinalized in the loop (#4761)
* Update higherThanFinalized in the loop
* Addded a test
* Fixed test
* Merge branch 'master' into update-finalized
2020-02-05 20:13:38 +00:00
Preston Van Loon
c496170c33 Do not attempt to save a nil state (#4758) 2020-02-05 11:52:14 -08:00
Jim McDonald
945edb6c8f Sync to highest possible head given the peers available (#4570)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-05 12:44:32 -06:00
garyschulte
24a5a9c112 add prometheus metrics for validator accounts (#4724)
* add prometheus metrics for validator accounts:
  * gauge for balances
  * counters for attestations and failures
  * counters for aggregations and failures
  * counters for proposals and failures

put validator account metrics behind flag

* run gazelle to reorganize deps

* fix typo

Co-authored-by: garyschulteog <30323939+garyschulteog@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-05 11:49:27 -06:00
terence tsao
0180051b5e Update head slot metric after compute head (#4754)
* Update head slot after compute head
* Merge branch 'master' into properly-update-head-metric
* Merge refs/heads/master into properly-update-head-metric
2020-02-05 17:24:05 +00:00
terence tsao
ce79d8e295 Insert initial sync missing blocks to fork choice store (#4750)
* Upon start up, don't insert head to proto array node DAG as index 0
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into fix-save-head-root
* Add HasNode
* Add fillInForkChoiceMissingBlocks
* Comments
* Test
* Fixed test
* Merge branch 'master' into proto-array-process-missing-block
* Merge branch 'master' into proto-array-process-missing-block
* Merge branch 'master' into proto-array-process-missing-block
2020-02-05 17:05:51 +00:00
Raul Jordan
9579a5520b Update All Libp2p Dependencies (#4746)
* all libp2p deps added
* Merge branch 'master' into update-libp2p
* add in dep
* kad
* fix p2p test
* p2p
* Merge refs/heads/master into update-libp2p
* Merge refs/heads/master into update-libp2p
* Merge refs/heads/master into update-libp2p
* Merge refs/heads/master into update-libp2p
2020-02-05 16:37:33 +00:00
Preston Van Loon
9958afe79d RPC: Use the proper db access level, use head root from head fetcher (#4752)
* Use the proper db access level, use head root from head fetcher
* Reuse head root
2020-02-05 08:50:07 +00:00
terence tsao
8ad174ffd8 Upon start up, don't insert head to proto array node DAG as index 0 (#4749) 2020-02-05 11:11:51 +08:00
Preston Van Loon
68b6a7c172 Render graphviz graph in page (#4748) 2020-02-04 17:14:43 -08:00
terence tsao
8c5c7352b1 Update blockchain metrics (#4747)
* Make new metrics the canonical one
* Both threads share same metric set
* Comments
* Fix vis
2020-02-04 22:57:36 +00:00
Raul Jordan
b705ab0239 Update dependencies from renovate (#4745)
* add in proto deps

* more deps

* more deps

* some revert

* add machinery

* downgrade k8s api

* gaz

* thrift
2020-02-04 14:13:40 -06:00
Ivan Martinez
923d5fc903 Cleanup slasher codebase (#4698)
* First wave of changes
* More changes
* More renames, changes
* Fix errors
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Fix errors, more cleaning
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* fix err
* Merge branch 'cleanup-slasher' of https://github.com/0xKiwi/Prysm into cleanup-slasher
* Fix strings
* More cleanup
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Fix interface
* Fix
* Merge branch 'master' into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Address comments
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Merge branch 'cleanup-slasher' of https://github.com/0xKiwi/Prysm into cleanup-slasher
2020-02-04 18:48:51 +00:00
terence tsao
9c1a294bf7 Misc fork choice improvements (#4744)
* Added missing spec implementation
* Use it
* Rename
* Merge branch 'master' into update-justified
2020-02-04 17:35:06 +00:00
Raul Jordan
061960c9e2 Resolve Miscellaneous Bugs in Beacon Node (#4743)
* add in nil check for head block

* fix logic

* unused import
2020-02-04 11:21:02 -06:00
terence tsao
80248cd296 Fix pre state of target block does not exist error (#4740)
* Save state even w/ initial sync cache state flag

* Tested

Co-authored-by: Nishant Das <nish1993@hotmail.com>
2020-02-04 20:19:09 +08:00
Nishant Das
95b6cca399 add lock (#4739) 2020-02-04 16:31:31 +08:00
Raul Jordan
1478882b41 Add Endpoint to Return Current Chain Config Parameters (#4595)
* add patch and endpoint
* formatting
* Merge branch 'master' into chain-config-endpoint
* Merge branch 'master' into chain-config-endpoint
* include beacon config
* config params
* Merge branch 'chain-config-endpoint' of github.com:prysmaticlabs/prysm into chain-config-endpoint
* include tests
* resolve confs
* use patch
* Merge branch 'master' into chain-config-endpoint
* passing tests
* Merge branch 'chain-config-endpoint' of github.com:prysmaticlabs/prysm into chain-config-endpoint
* Merge branch 'master' into chain-config-endpoint
* Merge branch 'master' into chain-config-endpoint
* Merge refs/heads/master into chain-config-endpoint
2020-02-04 05:28:35 +00:00
Preston Van Loon
7c4950832c Increase BLS pubkey cache to 100k from 10k (#4737)
* Increase BLS pubkey cache to 100k from 10k
2020-02-04 05:00:20 +00:00
Preston Van Loon
ce0b55d13e Ensure all fields are dirty on initialization (#4735) 2020-02-03 20:38:13 -06:00
Preston Van Loon
e63119b254 Better state locking (#4733)
* Rearrange lock a bit

* better locking without deadlock

* reorder lock
2020-02-03 17:19:22 -06:00
terence tsao
8492273fa7 Better log for Requesting block for pending attestation... (#4731)
* Better log
* Use debug
* Merge branch 'master' into req-blk-log
* Merge branch 'master' into req-blk-log
2020-02-03 21:11:21 +00:00
Preston Van Loon
5b4025efcd SSZ state cache: Only use cached value when flag is on (#4732)
* Only use cached value when flag is on
2020-02-03 20:49:07 +00:00
terence tsao
c69385e71d Clean up verify attestation and better error log (#4729) 2020-02-03 11:46:26 -08:00
terence tsao
cdfa969ced Insert block to fork choice after saving the block to DB (#4728)
* Let's try this
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm
* Better place to insert block to fork choice store
* Fmt
* Revert a few changes
* Revert a few changes
* Comments
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into insert-blk-forkchoice
* Merge refs/heads/master into insert-blk-forkchoice
2020-02-03 17:41:36 +00:00
shayzluf
a1dc4ddc40 Add get slashing endpoints (#4674)
* update go pbs
* protos
* merge
* pbs
* implement first version
* add slashing status endpoints and test
* Merge branch 'master' of github.com:prysmaticlabs/prysm into get_slashings
* add tests
* gaz and goimports
* gaz
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Merge refs/heads/master into get_slashings
* Update proto/slashing/slashing.proto
* Update proto/slashing/slashing.proto
* Update proto/slashing/slashing.proto
* Merge refs/heads/master into get_slashings
2020-02-03 17:31:54 +00:00
Jim McDonald
648584b356 Add wallet keymanager (#4687)
* Add wallet keymanager

* Read keymanageropts from file if not JSON

Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-02-03 11:13:58 -06:00
Nishant Das
fb7a75d2c3 Release Proposer Index Cache (#4717)
* release cache

* gaz

* fix all tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-02-03 10:23:04 -06:00
Jim McDonald
00a6361c66 Relegate some p2p messages (#4725)
* Relegate some p2p messages
2020-02-03 11:27:20 +00:00
Preston Van Loon
6213c94a14 Fork choice: only update head if the new block is a higher block slot (#4722)
* only update head IF the new block is a higher block slot
* Merge refs/heads/master into forkchoice-by-highest-blockslot
2020-02-03 01:40:20 +00:00
Preston Van Loon
397b7d807a Pubsub: Ignore block already in database (#4721)
* Ignore block already in database
* Merge refs/heads/master into skip-block-already-in-db
2020-02-03 00:37:50 +00:00
Preston Van Loon
05876d6250 Only update committee cache if it doesn't have that key already (#4719)
* Only update committee cache if it doesn't have that key already
2020-02-03 00:24:54 +00:00
Preston Van Loon
bd334c4192 Minor fixes (#4716)
* copy state in cache, ensure pre state exists before attepting to process attestation in fork choice
* copy state in cache, ensure pre state exists before attepting to process attestation in fork choice
* fix test
2020-02-02 05:23:05 +00:00
Preston Van Loon
4f38333e54 Copy head state to ensure it is never mutated (#4715)
* Copy head state to ensure it is never mutated
* Merge refs/heads/master into copy-head-state
* fix tests
* Merge branch 'copy-head-state' of github.com:prysmaticlabs/prysm into copy-head-state
2020-02-02 03:19:51 +00:00
Nishant Das
d6bd389d5c Custom Copy of Pending Attestations (#4711)
* custom copy
* lint
* Merge refs/heads/master into customCopy
* preston's review
* Merge branch 'customCopy' of https://github.com/prysmaticlabs/geth-sharding into customCopy
* Merge refs/heads/master into customCopy
* Merge refs/heads/master into customCopy
* nil check
* Merge refs/heads/master into customCopy
* Merge branch 'customCopy' of https://github.com/prysmaticlabs/geth-sharding into customCopy
* Merge refs/heads/master into customCopy
* fixed test
* Merge branch 'customCopy' of https://github.com/prysmaticlabs/geth-sharding into customCopy
2020-02-02 02:56:53 +00:00
Preston Van Loon
962be9b4f8 Propagate blocks again after we process it in pending blocks queue (#4714)
* propagate blocks again after we process it in pending blocks queue
* Merge refs/heads/master into reprop-block
2020-02-02 02:22:59 +00:00
terence tsao
f77049ae74 Handle attestations with missing block (#4705)
* Fmt
* Starting
* Cont
* Store aggregate attestation is better
* Conflict
* Done
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into process-pending-atts
* Comment
* Better logs
* Better logs
* Fix existing tests
* Update metric names
* Preston's feedback
* Broadcast atts once it's valid
* Gazelle
* Test for validating atts and pruning
* Tests
* Removed debug log
* Conflict
* Feedback
* Merge refs/heads/master into process-pending-atts
* Merge refs/heads/master into process-pending-atts
2020-02-02 01:42:29 +00:00
Preston Van Loon
f432f7851e BeaconState: Use copy on write for validator index map (#4713)
* Use copy on write for validator index map
* Merge refs/heads/master into copy-on-write-2
2020-02-02 01:21:50 +00:00
Preston Van Loon
b7e6012628 Better eth1data equals (#4712)
* Better eth1data equals
* Merge branch 'master' into better-eth1-data-equal
2020-02-02 01:03:21 +00:00
Preston Van Loon
79434fc2d1 Use sync.pool for keccak256 and sha256 (#4710)
* Use sync.pool for keccak

* Add sha256 too

* custom hasher use pool too

* reset custom hasher

* fix?

* Add comment that customHasher should only be used in cases of more than 5 usages
2020-02-01 16:37:37 -08:00
Preston Van Loon
069ec1726b Pending blocks queue: Better locking priority (#4709)
* Better locking priority, use correct lock on validating pending blocks
2020-02-01 22:47:51 +00:00
Preston Van Loon
2a79c572a5 Pruning old states: Use a warning level log instead of fatal (#4707)
* Use a warning level log instead of fatal
2020-02-01 20:51:20 +00:00
Preston Van Loon
c2fbb40909 Beacon state: copy on write for certain large fields (#4699)
* begin state service

* begin on the state trie idea

* created beacon state structure

* add in the full clone getter

* return by value instead

* add all setters

* new state setters are being completed

* arrays roots exposed

*  close to finishing all these headerssss

* functionality complete

* added in proto benchmark test

* test for compatibility

* add test for compat

* comments fixed

* add clone

* add clone

* remove underlying copies

* make it immutable

* integrate it into chainservice

* revert

* wrap up comments for package

* address all comments and godocs

* address all comments

* clone the pending attestation properly

* properly clone remaining items

* tests pass fixed bug

* begin using it instead of head state

* prevent nil pointer exceptions

* begin using new struct in db

* integrated new type into db package

* add proper nil checks

* using new state in archiver

* refactored much of core

* editing all the precompute functions

* done with most core refactor

* fixed up some bugs in the clone comparisons

* append current epoch atts

* add missing setters

* add new setters

* fix other core methods

* fix up transition

* main service and forkchoice

* fix rpc

* integrated to powchain

* some more changes

* fix build

* improve processing of deposits

* fix error

* prevent panic

* comment

* fix process att

* gaz

* fix up att process

* resolve existing review comments

* resolve another batch of gh comments

* resolve broken cpt state

* revise testutil to use the new state

* begin updating the state transition func to pass in more compartmentalized args

* finish editing transition function to return errors

* block operations pretty much done with refactor

* state transition fully refactored

* got epoch processing completed

* fix build in fork choice

* fixing more of the build

* fix up broken sync package

* it builds nowww it buildssss

* revert registry changes

* Recompute on Read (#4627)

* compute on read

* fix up eth1 data votes

* looking into slashings bug introduced in core/

* able to advance more slots

* add logging

* can now sync with testnet yay

* remove the leaves algorithm and other merkle imports

* expose initialize unsafe funcs

* Update beacon-chain/db/kv/state.go

* lint

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* More Optimizations for New State (#4641)

* map optimization

* more optimizations

* use a custom hasher

* comment

* block operations optimizations

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* fixed up various operations to use the validator index map access

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* archiver tests pass

* fixing cache tests

* cache tests passing

* edited validator tests

* powchain tests passing

* halfway thru sync tests

* more sync test fixes

* add in tests for state/

* working through rpc tests

* assignments tests passed

* almost done with rpc/beacon tests

* resolved painful validator test

* fixed up even more tests

* resolve tests

* fix build

* reduce a randao mixes copy

* fixes under //beacon-chain/blockchain/...

* build //beacon-chain/core/...

* fixes

* Runtime Optimizations (#4648)

* parallelize shuffling

* clean up

* lint

* fix build

* use callback to read from registry

* fix array roots and size map

* new improvements

* reduce hash allocs

* improved shuffling

* terence's review

* use different method

* raul's comment

* new array roots

* remove clone in pre-compute

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul's review

* lint

* fix build issues

* fix visibility

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* fix visibility

* build works for all

* fix blockchain test

* fix a few tests

* fix more tests

* update validator in slashing

* archiver passing

* fixed rpc/validator

* progress on core tests

* resolve broken rpc tests

* blockchain tests passed

* fix up some tests in core

* fix message diff

* remove unnecessary save

* Save validator after slashing

* Update validators one by one

* another update

* fix everything

* fix more precompute tests

* fix blocks tests

* more elegant fix

* more helper fixes

* change back ?

* fix test

* fix skip slot

* fix test

* reset caches

* fix testutil

* raceoff fixed

* passing

* Retrieve cached state in the beginning

* lint

* Fixed tests part 1

* Fixed rest of the tests

* Minor changes to avoid copying, small refactor to reduce deplicated code

* Handle att req for slot 0

* New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689)

* Only populate merkle layers as needed, copy merkle layers on copy/clone.

* use custom copy

* Make maps of correct size

* slightly fast, doesn't wait for lock

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Target root can't be 0x00

* Don't use cache for current slot (may not be the right fix)

* fixed up tests

* Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing...

* Align with prev logic for process slots cachedState.Slot() < slot

* Fix Initial Sync Flag (#4692)

* fixes

* fix up some test failures due to lack of nil checks

* fix up some test failures due to lack of nil checks

* fix up imports

* revert some changes

* imports

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* resolving further conflicts

* Better skip slot cache (#4694)

* Return copy of skip slot cache state, disable skip slot cache on sync

* fix

* Fix pruning

* copy on write method

* gaz

* fix tests

* fix up issues with broken tests

* remove extra update

* remove debugging lines

* gofmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-31 23:23:34 -08:00
Preston Van Loon
d32493d43b Ensure exits are not for an already exited validator (#4701)
* ensure exits are not for an already exited validator
2020-02-01 01:18:36 +00:00
terence tsao
0b2b77c5b0 Remove validate_beacon_attestation (#4700) 2020-01-31 15:35:13 -08:00
terence tsao
d8c26590ca Prune dangling states in DB upon start up (#4697)
* Add pruneGarbageState and test
* Comments
* Update beacon-chain/blockchain/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/blockchain/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/blockchain/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Fixed test
* Merge refs/heads/master into prune-start-up
* Fixed test
2020-01-31 23:15:35 +00:00
Raul Jordan
cc741ed8af Ensure New State Type Tests Pass in Prysm (#4646)
* begin state service

* begin on the state trie idea

* created beacon state structure

* add in the full clone getter

* return by value instead

* add all setters

* new state setters are being completed

* arrays roots exposed

*  close to finishing all these headerssss

* functionality complete

* added in proto benchmark test

* test for compatibility

* add test for compat

* comments fixed

* add clone

* add clone

* remove underlying copies

* make it immutable

* integrate it into chainservice

* revert

* wrap up comments for package

* address all comments and godocs

* address all comments

* clone the pending attestation properly

* properly clone remaining items

* tests pass fixed bug

* begin using it instead of head state

* prevent nil pointer exceptions

* begin using new struct in db

* integrated new type into db package

* add proper nil checks

* using new state in archiver

* refactored much of core

* editing all the precompute functions

* done with most core refactor

* fixed up some bugs in the clone comparisons

* append current epoch atts

* add missing setters

* add new setters

* fix other core methods

* fix up transition

* main service and forkchoice

* fix rpc

* integrated to powchain

* some more changes

* fix build

* improve processing of deposits

* fix error

* prevent panic

* comment

* fix process att

* gaz

* fix up att process

* resolve existing review comments

* resolve another batch of gh comments

* resolve broken cpt state

* revise testutil to use the new state

* begin updating the state transition func to pass in more compartmentalized args

* finish editing transition function to return errors

* block operations pretty much done with refactor

* state transition fully refactored

* got epoch processing completed

* fix build in fork choice

* fixing more of the build

* fix up broken sync package

* it builds nowww it buildssss

* revert registry changes

* Recompute on Read (#4627)

* compute on read

* fix up eth1 data votes

* looking into slashings bug introduced in core/

* able to advance more slots

* add logging

* can now sync with testnet yay

* remove the leaves algorithm and other merkle imports

* expose initialize unsafe funcs

* Update beacon-chain/db/kv/state.go

* lint

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* More Optimizations for New State (#4641)

* map optimization

* more optimizations

* use a custom hasher

* comment

* block operations optimizations

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* fixed up various operations to use the validator index map access

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* archiver tests pass

* fixing cache tests

* cache tests passing

* edited validator tests

* powchain tests passing

* halfway thru sync tests

* more sync test fixes

* add in tests for state/

* working through rpc tests

* assignments tests passed

* almost done with rpc/beacon tests

* resolved painful validator test

* fixed up even more tests

* resolve tests

* fix build

* reduce a randao mixes copy

* fixes under //beacon-chain/blockchain/...

* build //beacon-chain/core/...

* fixes

* Runtime Optimizations (#4648)

* parallelize shuffling

* clean up

* lint

* fix build

* use callback to read from registry

* fix array roots and size map

* new improvements

* reduce hash allocs

* improved shuffling

* terence's review

* use different method

* raul's comment

* new array roots

* remove clone in pre-compute

* Update beacon-chain/state/types.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul's review

* lint

* fix build issues

* fix visibility

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* fix visibility

* build works for all

* fix blockchain test

* fix a few tests

* fix more tests

* update validator in slashing

* archiver passing

* fixed rpc/validator

* progress on core tests

* resolve broken rpc tests

* blockchain tests passed

* fix up some tests in core

* fix message diff

* remove unnecessary save

* Save validator after slashing

* Update validators one by one

* another update

* fix everything

* fix more precompute tests

* fix blocks tests

* more elegant fix

* more helper fixes

* change back ?

* fix test

* fix skip slot

* fix test

* reset caches

* fix testutil

* raceoff fixed

* passing

* Retrieve cached state in the beginning

* lint

* Fixed tests part 1

* Fixed rest of the tests

* Minor changes to avoid copying, small refactor to reduce deplicated code

* Handle att req for slot 0

* New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689)

* Only populate merkle layers as needed, copy merkle layers on copy/clone.

* use custom copy

* Make maps of correct size

* slightly fast, doesn't wait for lock

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Target root can't be 0x00

* Don't use cache for current slot (may not be the right fix)

* fixed up tests

* Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing...

* Align with prev logic for process slots cachedState.Slot() < slot

* Fix Initial Sync Flag (#4692)

* fixes

* fix up some test failures due to lack of nil checks

* fix up some test failures due to lack of nil checks

* fix up imports

* revert some changes

* imports

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* resolving further conflicts

* Better skip slot cache (#4694)

* Return copy of skip slot cache state, disable skip slot cache on sync

* fix

* Fix pruning

* fix up issues with broken tests

Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-31 12:57:01 -08:00
Raul Jordan
f97ac5f0d7 Remove Already Exited Validators From Queue (#4695)
* added regression test

* fixed the regression test units

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-30 17:57:42 -08:00
Ivan Martinez
85a38e6053 Make E2E more consistent, change log file setup (#4696)
* Make E2E more consistent, change log file setup
* Merge branch 'master' into fix-e2e-sync
2020-01-31 00:16:36 +00:00
terence tsao
7f07ad831e Update seen for attestation pool (#4669)
* Use `contain` instead of `overlap`
* Tests
* Update seen
* Merge branch 'master' into use-contain
* Use OR to track all of the bits we have seen so far
* Added one more test case
* gofmt
* Conflict
* Picked up Preston's changes
* Merge branch 'use-contain' of git+ssh://github.com/prysmaticlabs/prysm into use-contain
2020-01-30 23:30:37 +00:00
Jim McDonald
ad7d9ab1da Validator status updates (#4675)
* Update ValidatorStatus to match Ethereum APIs
* Tidy up status calculation
* Merge branch 'master' into validator-status-updates
* Merge branch 'master' into validator-status-updates
* Update beacon-chain/rpc/beacon/config.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update test names
2020-01-30 19:46:37 +00:00
terence tsao
e452b950d0 Better caching of attestation pre state (#4688) 2020-01-30 11:06:20 -08:00
Ivan Martinez
2e2cec3a61 Make E2E more resilient, check balance and participation every epoch (#4679) 2020-01-29 16:14:10 -08:00
Ivan Martinez
c80ffc640f Fix flag bug (#4690) 2020-01-29 16:16:37 -06:00
Dmitri Tsumak
f6b4637a91 Update Eth1FollowDistance to 16 for minimal config (#4566)
* Update Eth1FollowDistance to 16 for minimal config
* Merge branch 'master' into fix-minimal-config
* Merge branch 'master' into fix-minimal-config
* Merge branch 'master' into fix-minimal-config
* Merge branch 'master' into fix-minimal-config
* Merge branch 'master' into fix-minimal-config
* Fix tests after minimal config changes
2020-01-29 17:56:21 +00:00
Ivan Martinez
3e9bf58d81 Fix validator assignments on slot 0 (#4682)
* Fix validator acting upon first slot
* Change log to debug
* Fix roles at slot 0
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into fix-val
* Add regression test
* Formatting
* Add slot ticker regression test
2020-01-29 04:30:30 +00:00
terence tsao
a22c97739e Remove prune state (#4680)
* Rm prune state
* Merge branch 'master' into rm-prune-state
2020-01-29 03:53:57 +00:00
shayzluf
ade61717a4 Slasher data update from archive (#4563)
* first version

* cli context

* fix service

* starting change to ccache

* ristretto cache

* added test

* test on evict

* remove evict test

* test onevict

* comment for exported flag

* update all span maps on load

* fix setup db

* span cache added to help flags

* start save cache on exit

* save cache to db before close

* comment fix

* fix flags

* setup db new

* data update from archive node

* gaz

* slashing detection on old attestations

* un-export

* rename

* nishant feedback

* workspace cr

* lint fix

* fix calls

* start db

* fix test

* Update slasher/db/db.go

Co-Authored-By: Nishant Das <nishdas93@gmail.com>

* add flag

* fix fail to start beacon client

* mock beacon service

* fix imports

* gaz

* goimports

* add clear db flag

* print finalized epoch

* better msg

* Update slasher/db/attester_slashings.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul feedback

* raul feedback

* raul feedback

* raul feedback

* raul feedback

* add detection in runtime

* fix tests

* raul feedbacks

* raul feedback

* raul feedback

* goimports

* Update beacon-chain/blockchain/process_attestation_helpers.go

* Update beacon-chain/blockchain/receive_block.go

* Update beacon-chain/core/blocks/block_operations_test.go

* Update beacon-chain/core/blocks/block_operations.go

* Update beacon-chain/core/epoch/epoch_processing.go

* Update beacon-chain/sync/validate_aggregate_proof_test.go

* Update shared/testutil/block.go

* Update slasher/service/data_update.go

* Update tools/blocktree/main.go

* Update slasher/service/service.go

* Update beacon-chain/core/epoch/precompute/attestation_test.go

* Update beacon-chain/core/helpers/committee_test.go

* Update beacon-chain/core/state/transition_test.go

* Update beacon-chain/rpc/aggregator/server_test.go

* Update beacon-chain/sync/validate_aggregate_proof.go

* Update beacon-chain/rpc/validator/proposer_test.go

* Update beacon-chain/blockchain/forkchoice/process_attestation.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/db/indexed_attestations.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/service/data_update.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* terence feedback

* terence feedback

* goimports

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-29 07:14:51 +05:30
Ivan Martinez
9149c2e4f4 Replace no-genesis-delay with custom-genesis-delay (#4678)
* Change NoGenesisDelay to CustomGenesisDelay
* Implement flag
* gazelle
* Fix docs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into custom-genesis-delay
* Fix
* Gazelle
* Add case to fix bad math
* Merge branch 'master' into custom-genesis-delay
2020-01-28 21:07:43 +00:00
terence tsao
07ba594023 Fix initial sync cache state (#4677) 2020-01-28 12:43:54 -08:00
Ivan Martinez
ad01bfbcde Add sync test to E2E (#4654)
* Complete evaluator for chain consensus

* Add sync e2e test

* Cleanup

* Rename

* Add tad more offset for correct head

* Change offset to middle of slot

* Change head block root to head epoch

* comment

* Fix eth1

* Address comments

* Gazelle

* Change to use file

* Change to use reader

* Use fil
2020-01-28 13:16:00 -06:00
Raul Jordan
439a84fcb9 Clear Run Error in Powchain Service Upon Reconnect (#4671)
* clear the run err on reconnect
* Merge refs/heads/master into clear-err-on-reconnect
* nishant feedback
2020-01-28 04:04:38 +00:00
terence tsao
e2be2a21d0 Part 2 of block chain service refactor - move process attestation (#4672) 2020-01-27 18:04:43 -08:00
terence tsao
eaf7ae3774 Part 1 of block chain service refactor - move process block (#4670) 2020-01-27 13:48:16 -08:00
terence tsao
1fa301c79c Update node count based on insertion (#4653)
* Update node count based on insertion

* Update nodes.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-27 15:24:58 -06:00
Preston Van Loon
1c759f6404 Disable more fork choice options with flag on (#4665)
* Disable updating latest votes if disable fork choice
* do not recompute the block tree cache if the fork choice is not being used
2020-01-27 09:11:28 +00:00
Preston Van Loon
4960acb285 Fork choice: Ensure lengths are the same before checking overlap (#4663)
* Ensure lengths are the same before checking overlap
2020-01-27 07:18:02 +00:00
Ivan Martinez
127f05d531 Allow easy plugin of featureflags into E2E (#4659)
* Enable easy plugin of featureflags into E2E

* Gazelle

* Fix text

* Fix whitespace
2020-01-26 21:42:10 -05:00
Preston Van Loon
2f02a2baa3 Actually wire up exits (#4661)
* Actually wire up exits
* Merge branch 'master' into exit-fixes
2020-01-27 01:49:37 +00:00
terence tsao
d4bea51482 Proto array fork choice tree handler (#4658) 2020-01-26 12:25:33 -08:00
Nishant Das
4ea5661f8f Clear Pre-Genesis Objects (#4656)
* remove pre-genesis data
* Merge branch 'master' into clearUnusedObjects
* lint
* Merge branch 'clearUnusedObjects' of https://github.com/prysmaticlabs/geth-sharding into clearUnusedObjects
* fic build
* gaz
* faulty mock
* Update beacon-chain/blockchain/service.go
2020-01-26 17:50:40 +00:00
terence tsao
5eece9a507 Integrate proto array forkchoice to run time (#4649)
* Run time

* Fixed pruning

* Fixed test

* Fixed test

* Process attestations during init sync

* Raul's feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-25 14:22:25 -06:00
Nishant Das
417480ffa8 fix bug (#4650) 2020-01-25 09:19:50 -08:00
Jim McDonald
dd5a3fe80d Update docs for keymanager (#4651) 2020-01-25 06:25:58 -08:00
Ivan Martinez
fa2acb3632 Improve E2E to be more consistent with timing, and allow for custom flags (#4620)
* Add committees helper, benchmark, results show 62ms for 8k validators which was previously 4 minutes

* Add regression test with same data

* fix epoch conversion

* lint

* undo and lint

* Begin work on adding mainnet config benchmark

* Try more to get mainnet e2e

* Try to fix delay

* Get past chainstart on e2e

* Try to fix flaky

* Get demo config working

* Remove unneeded changes

* Change how flags are enabled

* Lower shard count

* Temp skip

* Fix e2e

* Fix testing to run until last epoch

* Fix

* Add ending time log and remove att cache flag

* Fix ordering

* Reenable flag

* Change ports from default

* Add no log for if there are no err logs

* Add block evaluator

* Try to improve evaluators

* Progress on attestation evaluator

* Remove attestation evaluator

* Fix e2e

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-01-25 15:39:56 +08:00
Ivan Martinez
1562d3252b Allow ListBeaconCommittees API to return previous epoch (#4647)
* Allow committees API to request previous epoch

* Fix error log

* Fix previous epoch test
2020-01-24 19:54:18 -06:00
Preston Van Loon
10341cbf7f Add flags to cluster pk manager (#4645)
* Add flags to cluster pk manager
* Merge branch 'master' into cluster-pk-mgr
2020-01-24 19:34:24 +00:00
terence tsao
0f730b5887 Part 10 of proto array fork choice - Add Store (#4644) 2020-01-24 10:58:19 -08:00
terence tsao
b313b46f79 Part 9 of proto array fork choice - get head (#4643) 2020-01-24 10:15:01 -08:00
Jim McDonald
a78defcd26 Move to keymanager/keymanageropts command line parameters (#4590)
* Move to keymanager/keymanageropts command line parameters

* Add help for individual keymanagers

* gazelle

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-24 11:21:31 -06:00
terence tsao
86f6a44da6 Part 8 of proto array fork choice - prune (#4642)
* Docs
* Interface definitions
* Fmt and gazelle
* Rename interface to interfaces
* Define all the type for protoarray
* Gaz
* Add error types
* Add compute delta helper
* Compute delta tests
* Gaz
* Add checking if nodes viable
* Test for viable head
* Test for non leaf node can lead to viable head
* Conflict
* Extra space
* Remove fmt print
* Add updateBestChildAndDescendant
* Tests
* Merge branch 'master' into proto-array-forkchoice-6
* Conflict
* Merge branch 'proto-array-forkchoice-6' of git+ssh://github.com/prysmaticlabs/prysm into proto-array-forkchoice-6
* Add applyScoreChanges
* More test
* Rename score to weight
* Conflict
* Add insert function
* Test
* Merge refs/heads/master into proto-array-forkchoice-8
* Merge refs/heads/master into proto-array-forkchoice-8
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into proto-array-forkchoice-8
* Merge branch 'proto-array-forkchoice-8' of git+ssh://github.com/prysmaticlabs/prysm into proto-array-forkchoice-8
* Add prune method
* Tests
* Fixed long line
2020-01-24 15:51:11 +00:00
terence tsao
d978c19a41 Part 6 of proto array fork choice - update weight (#4636) 2020-01-23 20:32:27 -08:00
Preston Van Loon
588773cd0c Remove pubkey to validator ID map from validator (#4634)
* Remove map from validator
* remove failure check test
* fix
* Merge refs/heads/master into validator-fix
* Merge refs/heads/master into validator-fix
* Merge refs/heads/master into validator-fix
* Add error log if validator not found in committee
* Merge refs/heads/master into validator-fix
2020-01-24 01:50:07 +00:00
Preston Van Loon
62a5931843 Use a client side rate limit to reduce chance of getting banned (#4637)
* Use a client side rate limit to reduce chance of getting banned

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-23 17:33:43 -08:00
Preston Van Loon
6d6e8be10a Disable kafka build by default (#4638)
* Disable kafka build by default
2020-01-23 23:44:09 +00:00
terence tsao
144dcc3a69 Part 5 of proto array fork choice - update best child and descendant (#4629) 2020-01-23 14:23:45 -08:00
Jim McDonald
0f27343364 Fix deposit inclusion slot calculation (#4635) 2020-01-23 15:48:51 -05:00
terence tsao
3388ab74cf Part 4 of proto array fork choice - check nodes viable (#4625)
* Docs

* Interface definitions

* Fmt and gazelle

* Rename interface to interfaces

* Define all the type for protoarray

* Gaz

* Add error types

* Add compute delta helper

* Compute delta tests

* Gaz

* Add checking if nodes viable

* Test for viable head

* Test for non leaf node can lead to viable head

* Extra space

* Remove fmt print

* Update beacon-chain/forkchoice/protoarray/nodes.go

Co-Authored-By: Nishant Das <nishdas93@gmail.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
2020-01-23 08:33:39 -08:00
Andre Miras
663557e44e Fixes broken links to docs.prylabs.network (#4628) 2020-01-23 08:14:35 -06:00
shayzluf
5df77848bb Fix go pbs (#4626)
* fix issues

* fix go pbs

* added services

* added services

* remove unused files

* bring back used files

* bring back db proto files

* gaz

* gaz and bring back faucet

* gaz and bring back rpc

* gaz and bring back rpc

* gaz and bring back rpc

* go imports

* remove unused
2020-01-23 16:03:11 +05:30
Ivan Martinez
ed3ab828a1 Implement attester protection into validator client (#4598)
* Add flag for attester protection

* Remove flags

* Add attestation history DB functions to validator client

* Fix comments

* Update interface to new funcs

* Fix test

* Add flags

* Implement most of attester protection

* Fix tests

* Add test for pruning

* Add more test cases for prunes

* Remove todo comment

* Fix comments

* Rename functions

* Fix logs

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-23 00:52:01 -05:00
Nishant Das
ee9b9e69dc Fix Faucet (#4624)
* fix faucet

* preston's review
2020-01-23 12:13:27 +08:00
Nishant Das
460250251d add flag (#4622) 2020-01-23 11:25:10 +08:00
Preston Van Loon
4aa7ebc2b7 Wire voluntary exits pool (#4613)
* Hookup voluntary exits pool
* Merge refs/heads/master into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* fix tests
* Merge branch 'wire-voluntary-exits' of github.com:prysmaticlabs/prysm into wire-voluntary-exits
* Merge refs/heads/master into wire-voluntary-exits
* gofmt
* Merge branch 'wire-voluntary-exits' of github.com:prysmaticlabs/prysm into wire-voluntary-exits
* gofmt
* gaz
* Merge refs/heads/master into wire-voluntary-exits
2020-01-22 22:27:44 +00:00
Tim Myers
a1e3c2d47c Add --p2p-host-dns flag to specify p2p external DNS (#4608)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-22 16:07:22 -06:00
Preston Van Loon
cc58b5aca6 Refactor block operations for validating exits slightly (#4612)
* Refactor block operations for validating exits slightly so that we don't have to advance state in a pubsub validator

* current slot

* remove duplicated validation for exits

* nil request check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-22 13:49:38 -08:00
Jim McDonald
9a395530b7 Tidy up error logging (#4609)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-22 15:12:49 -06:00
terence tsao
5cc6de9e67 Part 3 of proto array fork choice - compute delta helper (#4617)
* Docs

* Interface definitions

* Fmt and gazelle

* Rename interface to interfaces

* Define all the type for protoarray

* Gaz

* Add error types

* Add compute delta helper

* Compute delta tests

* Gaz

* Fix formatting and comments

* Apply suggestions from code review

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-01-22 14:19:52 -06:00
terence tsao
c041403a50 Part 2 of proto array fork choice - proto array types (#4616)
* Docs

* Interface definitions

* Fmt and gazelle

* Rename interface to interfaces

* Define all the type for protoarray

* Gaz
2020-01-22 13:12:41 -05:00
terence tsao
8d889f169e Part 1 of proto array fork choice - docs and interfaces (#4615)
* Docs

* Interface definitions

* Fmt and gazelle

* Rename interface to interfaces
2020-01-22 10:50:16 -06:00
shayzluf
b030771174 Slasher span cache (#4388)
* first version

* cli context

* fix service

* starting change to ccache

* ristretto cache

* added test

* test on evict

* remove evict test

* test onevict

* comment for exported flag

* update all span maps on load

* fix setup db

* span cache added to help flags

* start save cache on exit

* save cache to db before close

* comment fix

* fix flags

* setup db new

* nishant feedback

* workspace cr

* lint fix

* fix calls

* start db

* fix test

* Update slasher/db/db.go

Co-Authored-By: Nishant Das <nishdas93@gmail.com>

* add flag

* nishant feedback

* export Config

* fix imports

* fix imports

* fix imports

* Update slasher/service/service.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/service/service.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/service/service.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/service/service.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* remove mod print

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-21 23:39:21 -06:00
Raul Jordan
abe679e90e Create New Beacon State Data Structure (#4602)
* begin state service

* begin on the state trie idea

* created beacon state structure

* add in the full clone getter

* return by value instead

* add all setters

* new state setters are being completed

* arrays roots exposed

*  close to finishing all these headerssss

* functionality complete

* added in proto benchmark test

* test for compatibility

* add test for compat

* comments fixed

* add clone

* add clone

* remove underlying copies

* make it immutable

* integrate it into chainservice

* revert

* wrap up comments for package

* address all comments and godocs

* address all comments

* clone the pending attestation properly

* properly clone remaining items

* tests pass fixed bug

* prevent nil pointer exceptions

* fixed up some bugs in the clone comparisons

Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-21 22:36:12 -06:00
Preston Van Loon
bfda29f2ad Implement voluntary exits pool (#4610) 2020-01-21 15:29:04 -08:00
Nishant Das
e96c2f4949 use proper bound (#4607) 2020-01-21 07:45:06 -08:00
Nishant Das
a52f9d4549 Save Deposit Data at Every Interval (#4606)
* save only every 100 logs
2020-01-21 06:07:12 +00:00
terence tsao
29a7a587cf Fix old markdown links (#4603)
* Fix old MD links
* Revert
* Merge branch 'master' into clean-up-old-mds
2020-01-21 03:30:35 +00:00
Preston Van Loon
27254ad362 Use a better skip slots cache with a lock around it for identical parallel ProcessSlots requests (#4597)
* Use a better skip slots cache with a lock around it for common requests
* Merge refs/heads/master into better-skip-slots-cache
* add test
* Merge branch 'better-skip-slots-cache' of github.com:prysmaticlabs/prysm into better-skip-slots-cache
* Merge refs/heads/master into better-skip-slots-cache
* exit process slots if the context expired
* Revert "exit process slots if the context expired"

This reverts commit 1430d8ab19.
* ensure validation has a pubsub timeout
* Merge refs/heads/master into better-skip-slots-cache
* PR feedback
* Merge branch 'better-skip-slots-cache' of github.com:prysmaticlabs/prysm into better-skip-slots-cache
2020-01-21 02:19:42 +00:00
Raul Jordan
eb5e814eb4 Disable Fork Choice Feature Flag (#4574) 2020-01-20 17:45:37 -08:00
Celeste Ariana Seberras
0a8dbaaabc Updated doc portal links (#4599) 2020-01-20 15:41:02 -08:00
Andre Miras
e65d98925b Updaes README.md expose docker port 13000 (#4596)
Port 13000 also needs to be exposed if to improve connectivity and
receive more peers, refs #4323.
Also updates the "Docker on Windows" instructions for consistency.
2020-01-20 14:30:40 -06:00
Nishant Das
781b7d6870 Don't Panic if 0 Peers are Left (#4594)
* log error
* Merge branch 'master' into dontReturnError
* return blocks
* change back
* Merge branch 'dontReturnError' of https://github.com/prysmaticlabs/geth-sharding into dontReturnError
* use a more static finalized epoch
* Update beacon-chain/sync/initial-sync/round_robin.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>
* Merge refs/heads/master into dontReturnError
* jim's review
* Merge branch 'dontReturnError' of https://github.com/prysmaticlabs/geth-sharding into dontReturnError
* Update beacon-chain/sync/initial-sync/round_robin.go
2020-01-20 17:12:28 +00:00
Jim McDonald
dc4c1ca2b7 Ensure initial sync is initialised (#4587)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-20 10:38:27 -06:00
Andre Miras
d72e18ba60 Removes trailing backslash, refs #4562 (#4592)
* Removes trailing backslash, refs #4562
* Merge branch 'master' into feature/minor_documentation_fix
2020-01-19 22:46:19 +00:00
Ivan Martinez
a4db560e55 Prepare validator DB for attester protection implementation (#4584)
* Add flag for attester protection
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into protecc-attester-db
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into protecc-attester-db
* Remove flags
* Add attestation history DB functions to validator client
* Fix comments
* Update interface to new funcs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into protecc-attester-db
* Fix test
* Merge branch 'master' into protecc-attester-db
2020-01-19 22:05:48 +00:00
Preston Van Loon
e7ecd9329a Fix resync (#4585)
* reset synced to false
* comment
2020-01-19 03:29:08 +00:00
Nishant Das
3e7e447160 Make Status Requests Asynchronous (#4577)
* make rpc status requests async
* make whole block async
* fix nogo
* Update beacon-chain/sync/rpc_status.go
* Merge branch 'master' into makeAsync
* Merge refs/heads/master into makeAsync
2020-01-19 01:37:18 +00:00
Nishant Das
1b62e92159 Reset Status (#4576)
* reset status

* Update beacon-chain/powchain/service.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-19 09:24:19 +08:00
Preston Van Loon
aae27749d4 Release save deposits flag (#4581)
* release save deposits flag
* Merge refs/heads/master into release-save-deposits
2020-01-18 20:25:29 +00:00
Ivan Martinez
2ba81193b0 Add proto for attestation protection (#4579) 2020-01-18 14:46:33 -05:00
Preston Van Loon
68c1ca755d Don't mark peer as bad as part of this return. (#4575) 2020-01-18 12:46:12 +08:00
Preston Van Loon
ccfc650375 Better parent block request (#4572)
* Use a good peer instead of a random one, if we know about it
* Exit init sync if there is an issue
* Merge refs/heads/master into better-parent-block-processing
* Merge refs/heads/master into better-parent-block-processing
* Merge refs/heads/master into better-parent-block-processing
* Merge refs/heads/master into better-parent-block-processing
* Update pending_blocks_queue.go
2020-01-17 22:43:32 +00:00
terence tsao
3d3dccbdb4 Enabled proposer sig and randao verifications for init sync (#4573)
* Enabled proposer sig and randao verifications in init sync

* Comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-17 15:58:25 -06:00
Ivan Martinez
d04399ea96 Refactor generated benchmark files to allow for more general usage (#4436)
* Begin to refactor benchmark files to testutil

* Complete most of refactoring

* Fix file path

* gofmt

* Fix path

* Move generatego to tools/

* Move gen util to tools/benchmark-files-gen

* Add comments to pregen funcs

* Make function names consistent

* Update README

* Redo benchmarks with 16384 validators
2020-01-17 12:25:35 -05:00
Prince Sinha
0605118686 p2p: Added log for --p2p-host-ip (#4553)
* added log for external addr
* Merge branch 'master' into log-p2p-address
* Merge branch 'master' into log-p2p-address
* Merge branch 'master' into log-p2p-address
2020-01-17 11:07:37 +00:00
Jim McDonald
dab87ba252 Add --rpc-host option to beacon chain (#4571) 2020-01-16 20:18:26 -06:00
Raul Jordan
eb429ab719 Include Validator Index in GetDuties Response, Update EthereumAPIs (#4567)
* include new patch
* add patch and validator indices to duties resp
* test passing
* move call to validator index
* Merge branch 'master' into include-val-idx
* do not use wait groups anymore
* Merge branch 'include-val-idx' of github.com:prysmaticlabs/prysm into include-val-idx
* Update beacon-chain/rpc/validator/assignments_test.go
2020-01-16 22:37:51 +00:00
Raul Jordan
ed529965af Fix Up SSZ Cache Branch Recomputation (#4558)
* e2e ssz cache busting
* use ssz cache for e2e
* caching ensure
* fix up the cache even more
* gazelle
* formatting
* formatting
* add back cache for val registry
* sync
* fix up commented item
* add attestations
* Merge branch 'master' into e2e-ssz-cache
* Merge branch 'master' into e2e-ssz-cache
* Merge branch 'e2e-ssz-cache' of github.com:prysmaticlabs/prysm into e2e-ssz-cache
* formatting
* gaz
* Merge branch 'master' into e2e-ssz-cache
* resolve comments
* Merge branch 'master' into e2e-ssz-cache
* naming of test
* Merge refs/heads/master into e2e-ssz-cache
* Merge refs/heads/master into e2e-ssz-cache
2020-01-16 21:40:09 +00:00
Raul Jordan
c6343cac3a Enable RPCMaxPageSize via Beacon Node Flag (#4539)
* add new flag
* enforce max page size via flag
* ensure exists in flag group
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* Merge refs/heads/master into custom-max-page
* conflict with master
* resolved broken tests
* Update beacon-chain/flags/config.go
* Merge refs/heads/master into custom-max-page
2020-01-16 21:19:43 +00:00
Jim McDonald
06bc80d314 Add bad peer count (#4537)
* Add bad peer count
* Merge branch 'master' into badpeercount
* Merge branch 'master' into badpeercount
* Merge branch 'master' into badpeercount
* Merge branch 'master' into badpeercount
2020-01-16 21:07:09 +00:00
Nishant Das
60cab2dc73 update archive (#4443)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-16 12:37:04 -08:00
Prince Sinha
63d692a833 Fix deposit block slot before genesis state (#4495)
* before genesis state commit
* Merge branch 'master' into deposit-block-slot
* Merge branch 'master' into deposit-block-slot
* depositBlockSlot test added
* Merge branch 'deposit-block-slot' of https://github.com/princesinha19/prysm into deposit-block-slot
* Merge branch 'master' into deposit-block-slot
* Merge branch 'master' into deposit-block-slot
* resolve conflict
* status test commit
* Merge branch 'master' into deposit-block-slot
* Merge branch 'master' into deposit-block-slot
2020-01-16 19:38:30 +00:00
Jim McDonald
d744aaa2cd Better resync checking and running (#4516)
* Separate out fallen behind/resync check
* Remove hard-coded resync interval
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
* Merge branch 'master' into resync
2020-01-16 16:57:38 +00:00
JoshSnider
91d5ffae5b Remove invalid init-sync-no-verify option (#4562)
* Remove invalid `init-sync-no-verify` option

`init-sync-no-verify` was removed from `beacon-chain` in 32245a9062
* Merge branch 'master' into patch-1
2020-01-16 16:01:44 +00:00
terence tsao
cb49544fe3 Efficiently add proposer indices to cache (#4548)
* Use UpdateProposerIndicesInCache
* Merge branch 'master' into improve-proposer-cache
* Merge branch 'master' into improve-proposer-cache
* Merge branch 'master' into improve-proposer-cache
* Merge branch 'master' into improve-proposer-cache
2020-01-16 15:03:49 +00:00
Nishant Das
11731c4afe Fix RPC Panic (#4564) 2020-01-16 06:47:55 -08:00
Jim McDonald
5349b00e19 Tidy up peer logging (#4536)
* Tidy up peer logging
* Merge branch 'master' into peerlogs
* Merge branch 'master' into peerlogs
* Merge branch 'master' into peerlogs
2020-01-16 09:43:10 +00:00
Nishant Das
2e5429c94e Fix Stuck Beacon Node (#4454)
* Revert "Revert #4392 (#4449)"

This reverts commit 67c380b197.
* bound start req
* Merge refs/heads/master into revert-4449-revert-4392
* fix test
* Merge branch 'revert-4449-revert-4392' of https://github.com/prysmaticlabs/geth-sharding into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* add flag for deployment block
* Merge branch 'revert-4449-revert-4392' of https://github.com/prysmaticlabs/geth-sharding into revert-4449-revert-4392
* use constant and comments
* lint
* skip test for now
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Update shared/params/config.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/powchain/testing/mock.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* preston's review
* Merge branch 'revert-4449-revert-4392' of https://github.com/prysmaticlabs/geth-sharding into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* add flag
* Merge branch 'revert-4449-revert-4392' of https://github.com/prysmaticlabs/geth-sharding into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* use stateutils
* Merge branch 'revert-4449-revert-4392' of https://github.com/prysmaticlabs/geth-sharding into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
* Merge refs/heads/master into revert-4449-revert-4392
2020-01-16 01:46:15 +00:00
Nishant Das
0a632064d4 Fix Powchain Status (#4560)
* reset status
* Merge branch 'master' into fixStatus
* Merge refs/heads/master into fixStatus
2020-01-16 01:34:27 +00:00
Preston Van Loon
129bc763ee Rate limiter for rpc beacon blocks (#4549)
* use rate limiter for rpc beacon blocks

* gofmt

* don't delete empty buckets

* disconnect bad peers

* tell peer they are being rate limited

* defer disconnect

* fix tests

* set burst to x10

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-15 17:19:06 -08:00
Ivan Martinez
452cadc286 Cleanup featureconfig, make naming consistent (#4557)
* Cleanup featureconfig, make naming consistent

* Fix rename

* Change test package name
2020-01-15 18:41:40 -05:00
Ivan Martinez
2a4c89827d Add double proposal protection to validator client (#4460)
* Add double proposal protection to client

* Add mock test cases for past proposals, and after pruning

* Fix error

* Add force clear db to val in e2e

* Fix val tests

* Move saving proposal history to after broadcasting block

* Add featureflag

* Goimports

* Unexport flag

* Add flag to tests

* gazelle

* Move conditionals
2020-01-15 17:23:39 -05:00
terence tsao
5ab1efb537 Cached head root retrieve from DB on miss (#4552) 2020-01-14 21:02:02 -08:00
Preston Van Loon
d0793f00c5 Partially revert #4477 (#4550)
* Partially revert #4477
2020-01-15 00:29:02 +00:00
Preston Van Loon
d8d9f4482f p2p: Increment RPC metrics (#4547)
* Increment RPC metrics
* Merge refs/heads/master into rpc-metric
2020-01-14 17:02:50 +00:00
Nishant Das
4835ba7bdf Increment Metric at the Start of Validation (#4546)
* shift metric correctly
2020-01-14 16:49:15 +00:00
terence tsao
6ef1a712c2 OnBlockCacheFilteredTree (#4541) 2020-01-14 08:05:22 -08:00
Nishant Das
0bee1de486 Set Capacity for Slices (#4540)
* set capacities

* make it more accurate

* resolve it
2020-01-14 14:44:24 +08:00
Raul Jordan
d2d4e7e35d Benchmark and Optimize ListValidatorBalances (#4530)
* add balances api bench
* rename
* fix flakey test with sharding
* Merge branch 'master' into optimize-api
* optimizing the reqs for pagination
* Merge branch 'optimize-api' of github.com:prysmaticlabs/prysm into optimize-api
* Merge branch 'master' into optimize-api
* wrap up tests
* Merge branch 'optimize-api' of github.com:prysmaticlabs/prysm into optimize-api
* nishant comment
* Update beacon-chain/rpc/beacon/validators.go
2020-01-14 05:40:20 +00:00
Nishant Das
6e0248429f Fix Activation Queue (#4535)
* change operator
* Merge branch 'master' into fixQueue
* Merge refs/heads/master into fixQueue
* Merge refs/heads/master into fixQueue
* Merge refs/heads/master into fixQueue
* add test and fix issue
* Merge branch 'fixQueue' of https://github.com/prysmaticlabs/geth-sharding into fixQueue
2020-01-14 04:35:51 +00:00
terence tsao
884d2a159d Cache proposer indices (#4528)
* Precompute and plug it into run time
* Run time fix
* Testing
* More logging to debug
* More logging to debug
* This should fix it
* Clean up debug logs
* Removed last bit of debug log
* Comments
* Tests
* Merge branch 'master' into cache-proposer-index
* Gaz
* Merge branch 'cache-proposer-index' of git+ssh://github.com/prysmaticlabs/prysm into cache-proposer-index
* Merge refs/heads/master into cache-proposer-index
* Merge refs/heads/master into cache-proposer-index
* Merge refs/heads/master into cache-proposer-index
2020-01-14 04:08:32 +00:00
Preston Van Loon
415af93ad8 Minor tweaks to GetAttestationData (#4533)
* Maybe bugfix

* Maybe bugfix

* make GetAttestationData cheaper

* clone head state getter return values

* Fix tests

* fix e2e and revert most changes 😩

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-13 19:28:08 -08:00
shayzluf
0b35743d2c Attester proposer slashing store (#4315)
* Merge branch 'master' of github.com:prysmaticlabs/prysm into update_validators

# Conflicts:
#	slasher/flags/flags.go
#	slasher/main.go
#	slasher/service/data_update.go
#	slasher/service/service.go
#	slasher/service/service_test.go

* proposal and attester store

* day to status

* comment change

* one bucket

* Merge branch 'master' of github.com:prysmaticlabs/prysm into attester_proposer_slashing_store
# Please enter a commit message to explain why this merge is necessary,
# especially if it merges an updated upstream into a topic branch.
#
# Lines starting with '#' will be ignored, and an empty message aborts
# the commit.

added comments

* comment

* typo fix

* raul review fix

* raul review fix full

* nishant feedback

* test fix

* fix tests and remove update gofmt goimports

* remove blank line in imports

* nishant fixes

* comment and fir delete proposer slashings

* avoid marshal twice

* remove space

* Update slasher/db/attester_slashings.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* terence feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-14 07:43:25 +05:30
Preston Van Loon
62811e8f7c Only advance to the correct epoch (#4532) 2020-01-13 15:34:05 -08:00
Preston Van Loon
d4ae063ad7 Revert "Filter attestation with ProcessAttestationNoSignatureVerify" (#4529)
* Revert "Filter attestation with ProcessAttestationNoSignatureVerify (#4513)"

This reverts commit 22e01aa9f2.
2020-01-13 21:02:13 +00:00
Jim McDonald
3d24a85121 Tidy-up of BestFinalized (#4505)
* Tidy up BestFinalized
* Ensure no more than maxPeers returned
* Merge branch 'master' into bestfinalized
* Merge branch 'master' into bestfinalized
* Merge branch 'master' into bestfinalized
* Merge branch 'master' into bestfinalized
* Remove swap file
* Provide potential PIDs array with capacity
* Add test for trimming and ordering in BestFinalized
* Merge branch 'master' into bestfinalized
2020-01-13 18:12:10 +00:00
Prince Sinha
888e8925ee cli: Added flag for GRPC max msg size (#4524)
* added grpc max msg size flag
* Merge branch 'master' into grpc-cli-flag
* Merge branch 'master' into grpc-cli-flag
* Merge branch 'master' into grpc-cli-flag
* Merge branch 'master' into grpc-cli-flag
2020-01-13 17:29:43 +00:00
Preston Van Loon
18333293d0 Refactor database interface to prefer blockchain.HeadFetcher (#4523)
* start refactoring and deprecation round 1
* Merge branch 'master' of github.com:prysmaticlabs/prysm into single-source-of-truth-1
* Refactoring of database interface. Preferring limited access interface
* revert some changes from 008f992993
* Fix tests
* gofmt
* Merge branch 'master' into single-source-of-truth-1
* lint
* Merge refs/heads/master into single-source-of-truth-1
* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge refs/heads/master into single-source-of-truth-1
* Merge refs/heads/master into single-source-of-truth-1
* Merge refs/heads/master into single-source-of-truth-1
* Clone head block to avoid mutation
2020-01-13 17:02:20 +00:00
Nishant Das
e286069b20 Check Block Before Processing it (#4527)
* fix panic
* Update beacon-chain/sync/pending_blocks_queue.go
2020-01-13 15:09:22 +00:00
Jim McDonald
de2f1fbf5c Ignore VI swapfiles (#4525) 2020-01-13 06:43:14 -08:00
Jim McDonald
44fa2c6371 Only one handshake at a time with active peers (#4519) 2020-01-13 18:15:09 +08:00
terence tsao
a8edfa42cc Cache filtered block tree (#4515)
* Cache filtered block tree
* Merge refs/heads/master into cache-filtered-tree
* Merge refs/heads/master into cache-filtered-tree
* Add locks
* Merge branch 'cache-filtered-tree' of git+ssh://github.com/prysmaticlabs/prysm into cache-filtered-tree
* Confligt
* Merge refs/heads/master into cache-filtered-tree
* Merge refs/heads/master into cache-filtered-tree
* Update shared/featureconfig/flags.go

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Rlock
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into cache-filtered-tree
* Merge branch 'cache-filtered-tree' of git+ssh://github.com/prysmaticlabs/prysm into cache-filtered-tree
2020-01-13 04:12:50 +00:00
terence tsao
7edca61e44 #4506 take two (#4518)
* Samething, testing run time
* Check if the latest processed block root is the same
* Merge branch 'master' into improve-receive-block-reattempt
* Merge refs/heads/master into improve-receive-block-reattempt
* Merge refs/heads/master into improve-receive-block-reattempt
2020-01-12 23:55:28 +00:00
Preston Van Loon
1cb0edac00 PR #4502 take two (#4522)
* PR #4502 take two
2020-01-12 23:46:14 +00:00
Raul Jordan
88bce4af34 Revert "Deprecate new cache feature flag" (#4520)
* Revert "Deprecate new cache feature flag (#4502)"

This reverts commit 5287ddc114.
2020-01-12 23:15:08 +00:00
terence tsao
5287ddc114 Deprecate new cache feature flag (#4502)
* Starting to deprecate new cache flag
* All tests passing
* Fixed minimal test
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into deprecate-flag
* Fixed mainnet spec test
* Fixed a typo
* Merge refs/heads/master into deprecate-flag
* Merge refs/heads/master into deprecate-flag
* Merge refs/heads/master into deprecate-flag
2020-01-12 22:46:30 +00:00
terence tsao
22e01aa9f2 Filter attestation with ProcessAttestationNoSignatureVerify (#4513)
* Use no sig verify and comment

* Fixed all tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-12 16:32:33 -06:00
terence tsao
a79dab7919 Revert "ReceiveBlock: Only retrieve head block from DB if necessary (#4506)" (#4514)
This reverts commit 9a4bf6c1a2.
2020-01-12 14:08:25 -08:00
Preston Van Loon
9a4bf6c1a2 ReceiveBlock: Only retrieve head block from DB if necessary (#4506)
* Only retrieve head block from DB if necessary

* remove redundant comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-12 15:13:13 -06:00
Preston Van Loon
7992375a0e Check if we are already synced to the current epoch before querying all of our peers (#4504) 2020-01-11 18:31:30 -08:00
terence tsao
6c4bf22723 Fix up attestation pool (#4493)
* Update aggregated methods

* Update aggregated methods

* Use improved HasAttestation to check caches

* Add back some validations

* There's no need to save unaggregated att

* Fixed all the tests

* remove TODO for now

* Raul feedback

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-11 13:59:06 -08:00
Jim McDonald
ea12ffabba Log Ethereum 1 deposits before chainstart (#4499)
* Log Ethereum 1 deposits before chainstart
* Merge branch 'master' into logdeposits
2020-01-11 19:54:43 +00:00
Preston Van Loon
5077c009c3 Only advance slot when the request is for a future epoch (#4501) 2020-01-11 11:40:36 -08:00
Jim McDonald
7f1900e96c Remove unused function (#4496)
* Remove unused function
* Merge branch 'master' into rmunused
2020-01-11 11:20:05 +00:00
Jim McDonald
3c5d5bfc7b Use helper to calculate epoch (#4497) 2020-01-11 19:06:10 +08:00
Nishant Das
4e6c8c5b1a Batch Save Genesis Validators (#4494)
* save vals
* Merge branch 'master' into batchSaveGenesisValidators
2020-01-11 04:40:31 +00:00
Preston Van Loon
7919074a6a Add a step filter for beacon DB to retrieve blocks (#4488)
* Add a step filter for beacon DB to retrieve blocks
* Add a step filter for beacon DB to retrieve blocks
* gofmt
* Merge branch 'master' into db-step-filter
* Merge refs/heads/master into db-step-filter
* Merge refs/heads/master into db-step-filter
* Merge refs/heads/master into db-step-filter
* Merge refs/heads/master into db-step-filter
* fix tests
* Merge branch 'db-step-filter' of github.com:prysmaticlabs/prysm into db-step-filter
* Merge refs/heads/master into db-step-filter
2020-01-11 01:32:45 +00:00
Nishant Das
f6eea8e1fa Optimize Archival Assignment Retrieval (#4480)
* optimize further
* remove func
* Merge branch 'master' into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* Merge refs/heads/master into optimizeArchival
* raul's review
* Merge branch 'optimizeArchival' of https://github.com/prysmaticlabs/geth-sharding into optimizeArchival
* preston's review
2020-01-11 01:19:52 +00:00
terence tsao
45e6eccfb4 Add epoch filter for fork choice attestation (#4487)
* Filter target epoch
* Test
* Comment
* Merge branch 'master' into fix-target-epoch
* Merge refs/heads/master into fix-target-epoch
* Merge refs/heads/master into fix-target-epoch
* Merge refs/heads/master into fix-target-epoch
2020-01-10 23:51:49 +00:00
terence tsao
b6c6b9b776 Filter block tree verifies block root has state (#4490)
* Construct block tree ensures block root has state
* Merge refs/heads/master into filter-tree-has-state
2020-01-10 23:40:46 +00:00
Raul Jordan
1a9b0da9ae Batch Save Validator Indices (#4489)
* add bolt alloc fix
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* const
* add a batch save for indices to speed up sync
* Merge branch 'batch-save-indices' into bolt-alloc-fix
* fix up
* fix broken build
* Merge branch 'batch-save-indices' into bolt-alloc-fix
* Merge branch 'master' into batch-save-indices
* ensure it saves each
* Merge branch 'batch-save-indices' of github.com:prysmaticlabs/prysm into batch-save-indices
* Merge branch 'master' into batch-save-indices
* revert ssz cache
* Merge branch 'batch-save-indices' of github.com:prysmaticlabs/prysm into batch-save-indices
2020-01-10 23:27:01 +00:00
Raul Jordan
025be93492 Allocate More Resources to BoltDB (#4485)
* add bolt alloc fix
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* const
* Merge refs/heads/master into bolt-alloc-fix
2020-01-10 22:17:42 +00:00
Jim McDonald
22a3bf53ad Do not panic if initial sync fails (#4477)
* Do not panic if initial sync fails
* Only consider peers with non-0 finalized epoch
* Additional fixes
* Fix tests
* Merge branch 'master' into mvlog
* Merge branch 'master' into mvlog
2020-01-10 21:36:20 +00:00
terence tsao
37459ee765 Forkchoice att seen cache consider bitfield overlaps (#4483)
* Aggregate with previous aggregated attestations

* Update cache to bitfield as value

* Remove fmt print
2020-01-10 14:44:20 -06:00
terence tsao
e9d63e8dd3 Aggregate with previous aggregated attestations (#4478) 2020-01-10 10:56:28 -06:00
terence tsao
01b8a84e21 Check fork choice attestation's block and state in DB (#4475) 2020-01-10 06:25:43 -08:00
Preston Van Loon
9d8364bdfa only advance state in validate aggregate and proof if the epoch has changed between head state and attestation slot (#4474) 2020-01-09 19:11:02 -08:00
terence tsao
2b6a5aaaf9 Use db head info for request attestation (#4472) 2020-01-09 18:37:55 -08:00
Preston Van Loon
6aa92956f6 RPC: Use db.headBlock in getBlock (#4473)
* use headBlock in getBlock
* Merge branch 'master' into use-db-headblock
2020-01-10 02:00:36 +00:00
Preston Van Loon
eae2268dd1 DB: Prevent encoding a nil message (#4470)
* Prevent encoding a nil message
* Merge refs/heads/master into prevent-saving-nil-msg
2020-01-10 01:38:53 +00:00
Preston Van Loon
6de485c27e Use a longer deadline for processing pubsub messages (#4471)
* Use a longer deadline for processing pubsub messages
2020-01-10 01:27:20 +00:00
Jim McDonald
3839f577ec Change database *Index() to use slice (#4466)
* Change database *Index() to use slice

* Remove underscore from helper name
2020-01-09 14:45:05 -08:00
Jim McDonald
fc38a0413e Fix gauge description (#4468) 2020-01-09 11:39:11 -08:00
terence tsao
0dd0e23155 Optimize ListBeaconCommittees to use committees cache (#4464) 2020-01-09 08:28:11 -08:00
Nishant Das
699e1c8a23 Optimize List Validator Assignments (#4456)
* optimize
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* Merge refs/heads/master into listValidatorAssingments
* terence's and preston's review
* Merge branch 'listValidatorAssingments' of https://github.com/prysmaticlabs/geth-sharding into listValidatorAssingments
2020-01-09 05:07:24 +00:00
shayzluf
39b2570af5 Beacon node slasher client infrastructure (#4111) 2020-01-08 20:49:32 -08:00
Preston Van Loon
200bb5e42a Docker: Make root user the default (#4461)
* make root user the default
* Merge branch 'master' into root-user
* Merge refs/heads/master into root-user
* Merge refs/heads/master into root-user
2020-01-08 19:52:59 +00:00
Preston Van Loon
d249f78d79 Update tool README.md (#4463)
* Update README.md
* Merge refs/heads/master into prestonvanloon-patch-2
2020-01-08 19:44:34 +00:00
Nishant Das
e110f038bc Add Back Eth1 Block Delay (#4458)
* add delay
* Merge refs/heads/master into addBackDelay
* Merge refs/heads/master into addBackDelay
* Merge refs/heads/master into addBackDelay
* Merge refs/heads/master into addBackDelay
* Merge refs/heads/master into addBackDelay
2020-01-08 19:36:20 +00:00
Jim McDonald
5ee79dc4a8 Log fork version mismatches at debug (#4457)
* Log fork version mismatches at debug
* Merge branch 'master' into synclogerrors
* Merge branch 'master' into synclogerrors
* Merge branch 'master' into synclogerrors
2020-01-08 19:18:21 +00:00
Ivan Martinez
4ab0a91e51 Validator Slashing Protection DB (#4389)
* Begin adding DB to validator client

Begin adding ValidatorProposalHistory

Implement most of proposal history

Finish tests

Fix marking a proposal for the first time

Change proposalhistory to not using bit shifting

Add pb.go

Change after proto/slashing added

Finally fix protos

Fix most tests

Fix all tests for double proposal protection

Start initialiing DB in validator client

Add db to validator struct

Add DB to ProposeBlock

Fix test errors and begin mocking

Fix test formatting and pass test for validator protection!

Fix merge issues

Fix renames

Fix tests

* Fix tests

* Fix first startup on DB

* Fix nil check tests

* Fix E2E

* Fix e2e flag

* Fix comments

* Fix for comments

* Move proposal hepers to validator/client to keep DB clean

* Add clear-db flag to validator client

* Fix formatting

* Clear out unintended changes

* Fix build issues

* Fix build issues

* Gazelle

* Fix mock test

* Remove proposal history

* Add terminal confirmation to DB clearing

* Add interface for validatorDB, add context to DB functions

* Add force-clear-db flag

* Cleanup

* Update validator/node/node.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Change db to clear file, not whole folder

* Fix db test

* Fix teardown test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-08 13:16:17 -05:00
terence tsao
a69cb5c6e4 Committee cache fuzz tests (#4459)
* Starting TestCommitteeKeyFuzz_OK

* Added fuzz tests for committeees by epoch and active indices
2020-01-08 11:13:39 -06:00
terence tsao
624c42421d Add HasAggregatedAttestation getter for pool (#4451) 2020-01-08 06:59:49 -08:00
Jim McDonald
f3ae67a94b Fix bad update in #4453 (#4455) 2020-01-08 21:54:44 +08:00
Ivan Martinez
b30a7d1e19 Fix typos and inconsistencies (#4453)
* Fix typos and inconsistencies

* igoimports

* Gazelle
2020-01-07 20:36:55 -06:00
Ivan Martinez
0d400faea2 Remove unused parameters and unused code (#4452)
* Remove unused parameters
* Remove unused deposit contract config
2020-01-07 23:45:29 +00:00
Preston Van Loon
67c380b197 Revert #4392 (#4449)
* revert #4392
2020-01-07 21:15:40 +00:00
terence tsao
89eedd2123 Efficient computation of epoch participation (#4430)
* Remove custody (#3986)

* Update proto fields

* Updated block operations

* Fixed all block operation tests

* Fixed tests part 1

* Fixed tests part 1

* All tests pass

* Clean up

* Skip spec test

* Fixed ssz test

* Skip ssz test

* Skip mainnet tests

* Update beacon-chain/operations/attestation.go

* Update beacon-chain/operations/attestation.go
* Decoy flip flop check (#3987)
* Bounce attack check (#3989)

* New store values

* Update process block

* Update process attestation

* Update tests

* Helper

* Fixed blockchain package tests

* Update beacon-chain/blockchain/forkchoice/process_block.go
* Conflict
* Unskip mainnet spec tests (#3998)

* Starting

* Fixed attestation mainnet test

* Unskip ssz static and block processing tests

* Fixed workspace

* fixed workspace

* fixed workspace

* Update beacon-chain/core/blocks/block_operations.go
* Unskip minimal spec tests (#3999)

* Starting

* Fixed attestation mainnet test

* Unskip ssz static and block processing tests

* Fixed workspace

* fixed workspace

* fixed workspace

* Update workspace

* Unskip all minimal spec tests

* Update workspace for general test
* Unskip test (#4001)
* Update minimal seconds per slot to 6 (#3978)
* Bounce attack tests (#3993)

* New store values

* Update process block

* Update process attestation

* Update tests

* Helper

* Fixed blockchain package tests

* Slots since epoch starts tests

* Update justified checkpt tests

* Conflict

* Fixed logic

* Update process_block.go

* Use helper
* Conflict
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.1
* Conflict
* Fixed failed tests
* Lower MinGenesisActiveValidatorCount to 16384 (#4100)
* Fork choice beacon block checks (#4107)

* Prevent future blocks check and test

* Removed old code
* Update aggregation proto (#4121)

* Update def
* Update spec test
* Conflict
* Update workspace
* patch
* Resolve conflict
* Patch
* Change workspace
* Update ethereumapis to a forked branch at commit 6eb1193e47
* Fixed all the tests
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into conflict
* fix patch
* Need to regenerate test data
* Merge branch 'master' into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Enable snappy compression for all (#4157)

* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* Validate aggregate and proof subscriber (#4159)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Conflict
* Update workspace
* Conflict
* Conflict
* Conflict
* Merge branch 'master' into v0.9.2
* Merge branch 'master' into v0.9.2
* Conflict
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Remove migrate to snappy  (#4205)
* Feature flag: Deprecate --prune-states, release to all (#4204)

* Deprecated prune-states, release to all

* imports

* remote unused import

* remove unused import

* Rm prune state test

* gaz
* Refactoring for dynamic pubsub subscriptions for non-aggregated attestations (#4189)

* checkpoint progress

* chkpt

* checkpoint progress

* put pipeline in its own file

* remove unused imports

* add test, it's failing though

* fix test

* remove head state issue

* add clear db flag to e2e

* add some more error handling, debug logging

* skip processing if chain has not started

* fix test

* wrap in go routine to see if anything breaks

* remove duplicated topic

* Add a regression test. Thanks @nisdas for finding the original problem. May it never happen again *fingers crossed*

* Comments

* gofmt

* comment out with TODO
* Sync with master
* Sync with master
* RPC servers use attestation pool (#4223)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Refactor RPC to Fully Utilize Ethereum APIs (#4243)

* include attester as a file in the validator server

* remove old proposer server impl

* include new patch and properly sync changes

* align with public pbs

* ensure matches rpc def

* fix up status tests

* resolve all broken test files in the validator rpc package

* gazelle include

* fix up the duties implementation

* fixed up all get duties functions

* all tests pass

* utilize new ethereum apis

* amend validator client to use the new beacon node validator rpc client

* fix up most of validator items

* added in mock

* fix up test

* readd test

* add chain serv mock

* fix a few more validator methods

* all validator tests passingggg

* fix broken test

* resolve even more broken tests

* all tests passsssss

* fix lint

* try PR

* fix up test

* resolve broken other tests
* Sync with master
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Aggregate and proof subscriber (#4240)

* Added subscribers

* Fixed conflict

* Tests

* fix up patch

* Use upstream pb

* include latest patch

* Fmt

* Save state before head block
* skip tests (#4275)
* Delete block attestations from the pool (#4241)

* Added subscribers
* Clean up
* Fixed conflict
* Delete atts in pool in validate pipeline
* Moved it to subscriber
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into use-att-pool-3
* Test
* Fixed test
* Initial work on voluntary exit (#4207)

* Initial implementation of voluntary exit: RPC call

* Update for recent merges

* Break out validation logic for voluntary exits to core module

* RequestExit -> ProposeExit

* Decrease exit package visibility

* Move to operation feed

* Wrap errors
* Fix critical proposer selection bug #4259 (#4265)

* fix critical proposer selection bug #4259

* gofmt

* add 1 more validator to make it 5

* more tests

* Fixed archivedProposerIndex

* Fixed TestFilterAttestation_OK

* Refactor ComputeProposerIndex, add regression test for potential out of range panic

* handle case of nil validator

* Update validators_test.go
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Leftover merge files, oops
* gaz
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* Fixes Duplicate Validator Bug (#4322)

* Update dict

* Test helper

* Regression test

* Comment

* Reset test cache
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fixes after PR #4328
* Complete attestation pool for run time (#4286)

* Added subscribers

* Fixed conflict

* Delete atts in pool in validate pipeline

* Moved it to subscriber

* Test

* Fixed test

* New curl for forkchoice attestations

* Starting att pool service for fork choice

* Update pool interface

* Update pool interface

* Update sync and node

* Lint

* Gazelle

* Updated servers, filled in missing functionalities

* RPC working with 1 beacon node 64 validators

* Started writing tests. Yay

* Test to aggregate and save multiple fork choice atts

* Tests for BatchAttestations for fork choice

* Fixed exisiting tests

* Minor fixes

* Fmt

* Added batch saves

* Lint

* Mo tests yay

* Delete test

* Fmt

* Update interval

* Fixed aggregation broadcast

* Clean up based on design review comment

* Fixed setupBeaconChain

* Raul's feedback. s/error/err
* resolve conflicts
* Merge branch 'v0.9.2' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Removed old protos and fixed tests (#4336)
* Merge refs/heads/master into v0.9.2
* Disallow duplicated indices and test (#4339)
* Explicit use of GENESIS_SLOT in fork choice (#4343)
* Update from 2 to 3 (#4345)
* Remove verify unaggregated attestation when aggregating (#4347)
* use slot ticker instead of run every (#4348)
* Add context check for unbounded loop work (#4346)
* Revert "Explicit use of GENESIS_SLOT in fork choice (#4343)" (#4349)

This reverts commit d3f6753c77.
* Refactor Powchain Service (#4306)

* add data structures

* generate proto

* add in new fields

* add comments

* add new mock state

* add new mock state

* add new methods

* some more changes

* check genesis time properly

* lint

* fix refs

* fix tests

* lint

* lint

* lint

* gaz

* fix lint

* raul's comments

* use one method

* fix test

* raul's comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Ensure best better-justification is stored for fork choice (#4342)

* Ensure best better-justification is stored. Minor refactor
* Tests
* Merge refs/heads/v0.9.2 into better-best-justified
* Merge refs/heads/v0.9.2 into better-best-justified
* Ensure that epoch of attestation slot matches the target epoch (#4341)

* Disallow duplicated indices and test
* Add slot to target epoch check to on_attestation
* Add slot to target epoch check to process_attestation
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into no-dup-att-indices
* Fixed TestProcessAttestations_PrevEpochFFGDataMismatches
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Update beacon-chain/blockchain/forkchoice/process_attestation_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Filter viable branches in fork choice (#4355)
* Only activate upon finality (#4359)

* Updated functions
* Tests
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into queue-fix-on-finality
* Comment
* Merge refs/heads/v0.9.2 into queue-fix-on-finality
* Fixed failing test from 4359 (#4360)

* Fixed
* Skip registry spec tests
* Wait for state to be initialized at least once before running slot ticker based on genesis time (#4364)
* Sync with master
* Fix checkpoint root to  use genesis block root (#4368)
* Return an error on nil head state in fork choice (#4369)

* Return error if nil head state

* Fixed tests. Saved childen blocks state

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Update metrics every epoch (#4367)
* return empty slice if state is nil (#4365)
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Pubsub: Broadcast attestations to committee based subnets (#4316)

* Working on un-aggregated pubsub topics

* update subscriber to call pool

* checkpointing

* fix

* untested message validation

* minor fixes

* rename slotsSinceGenesis to slotsSince

* some progress on a unit test, subscribe is not being called still...

* dont change topic

* need to set the data on the message

* restore topic

* fixes

* some helpful parameter changes for mainnet operations

* lint

* Terence feedback

* unskip e2e

* Unit test for validate committee index beacon attestation

* PR feedbacK

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into resolveConflicts
* remove condition
* Remove unused operation pool (#4361)
* Merge refs/heads/master into v0.9.2
* Aggregate attestations periodically  (#4376)
* Persist ETH1 Data to Disk (#4329)

* add data structures

* generate proto

* add in new fields

* add comments

* add new mock state

* add new mock state

* add new methods

* some more changes

* check genesis time properly

* lint

* fix refs

* fix tests

* lint

* lint

* lint

* gaz

* adding in new proto message

* remove outdated vars

* add new changes

* remove latest eth1data

* continue refactoring

* finally works

* lint

* fix test

* fix all tests

* fix all tests again

* fix build

* change back

* add full eth1 test

* fix logs and test

* add constant

* changes

* fix bug

* lint

* fix another bug

* change back

* Apply suggestions from code review

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Fixed VerifyIndexedAttestation (#4382)
* rm signing root (#4381)

* rm signing root

* Fixed VerifyIndexedAttestation

* Check proposer slashed status inside ProcessBlockHeaderNoVerify

* Fixed TestUpdateJustified_CouldUpdateBest

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove Redundant Trie Generation (#4383)

* remove trie generation
* remove deposit hashes
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* fix build
* Conflict
* Implement StreamAttestations RPC Endpoint (#4390)

* started attestation stream

* stream attestations test

* on slot tick test passing

* imports

* gaz

* Update beacon-chain/rpc/beacon/attestations_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

Co-authored-by: shayzluf <thezluf@gmail.com>
* Fixed goimport (#4394)
* Use custom stateutil ssz for ssz HTR spec tests (#4396)

* Use custom stateutil ssz for ssz HTR spec tests

* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* set mainnet to be the default for build and run (#4398)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* gracefully handle deduplicated registration of topic validators (#4399)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* SSZ: temporarily disable roots cache until cache issues can be resolved (#4407)

* temporarily disable roots cache until cache issues can be resolved

* Also use custom ssz for spectests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove process block attestations as separate routine (#4408)

* Removed old save/process block atts

* Fixed tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save Deposit Cache to Disk (#4384)

* change to protos

* fix build

* glue everything together

* fix test

* raul's review

* preston's comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Fix activation queue sorting (#4409)

* Removed old save/process block atts

* Fixed tests

* Proper sorting by eligibility epoch then by indices

* Deleted old colde
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' into v0.9.2
* Merge refs/heads/master into v0.9.2
* stop recursive lookup if context is cancelled (#4420)
* Fix proposal bug (#4419)
* Add Pending Deposits Safely (#4422)

* safely prune cache

* use proper method

* preston's,terence's reviews and comments

* revert change to build files

* use as feature config instead
* Release custom state ssz (#4421)

* Release custom state ssz, change all HTR of beacon state to use custom method

* typo

* use mainnet config

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Define framework
* Use participation fetcher
* Build
* Fixed all tests
* Lint
* Update initial sync save justified to align with v0.9.3 (#4432)
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fix build
* don't blacklist on pubsub (#4435)
* Fix Flakey Slot Ticker Test (#4434)

* use interface instead for the slot ticker

* fixed up flakey tests

* add gen time

* get duties comment

* fix lifecycle test

* more fixes
* Fixed rest of the test
* Pass in correct chain service
* Pass in another chain service
* Run time
* Configurable min genesis delay (#4437)

* Configurable min genesis delay based on https://github.com/ethereum/eth2.0-specs/pull/1557

* remove feature flag for genesis delay

* fix

* demo config feedback
* Current -> Prev
* Tests
* patch readme
* save keys unencrypted for validators (#4439)
* Add new demo configuration targeting mainnet scale (#4397)

* Add new demo configuration targeting mainnet, with 1/10th of the deposit value

* reduce quotant by 1/10th. Use 1/10th mainnet values

* only change the inactivity quotant

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save justified checkpoint state (#4433)

* Save justified checkpoint state

* Lint

* Feedback

* Fixed test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Update shared/testutil/deposits.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update proto/testing/ssz_regression_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/core/epoch/epoch_processing.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/kv/forkchoice.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/proposer.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/prepare_forkchoice.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/aggregator/server.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/cache/depositcache/pending_deposits.go
* Update beacon-chain/cache/depositcache/pending_deposits_test.go
* Update beacon-chain/rpc/validator/proposer.go
* Merge refs/heads/master into v0.9.2
* Update test
* Conflict
* Update beacon-chain/blockchain/chain_info.go
* Conflict
* Merge branch 'efficient-participation' of git+ssh://github.com/prysmaticlabs/prysm into efficient-participation
* Merge refs/heads/master into efficient-participation
2020-01-07 19:28:25 +00:00
Preston Van Loon
2182e1cdc9 Fix pk manager db (#4447)
* fix pk manager db
2020-01-07 19:19:40 +00:00
terence tsao
6d2a2ebadf Update run time to v0.9.3 (#4154)
* Remove custody (#3986)

* Update proto fields

* Updated block operations

* Fixed all block operation tests

* Fixed tests part 1

* Fixed tests part 1

* All tests pass

* Clean up

* Skip spec test

* Fixed ssz test

* Skip ssz test

* Skip mainnet tests

* Update beacon-chain/operations/attestation.go

* Update beacon-chain/operations/attestation.go
* Decoy flip flop check (#3987)
* Bounce attack check (#3989)

* New store values

* Update process block

* Update process attestation

* Update tests

* Helper

* Fixed blockchain package tests

* Update beacon-chain/blockchain/forkchoice/process_block.go
* Conflict
* Unskip mainnet spec tests (#3998)

* Starting

* Fixed attestation mainnet test

* Unskip ssz static and block processing tests

* Fixed workspace

* fixed workspace

* fixed workspace

* Update beacon-chain/core/blocks/block_operations.go
* Unskip minimal spec tests (#3999)

* Starting

* Fixed attestation mainnet test

* Unskip ssz static and block processing tests

* Fixed workspace

* fixed workspace

* fixed workspace

* Update workspace

* Unskip all minimal spec tests

* Update workspace for general test
* Unskip test (#4001)
* Update minimal seconds per slot to 6 (#3978)
* Bounce attack tests (#3993)

* New store values

* Update process block

* Update process attestation

* Update tests

* Helper

* Fixed blockchain package tests

* Slots since epoch starts tests

* Update justified checkpt tests

* Conflict

* Fixed logic

* Update process_block.go

* Use helper
* Conflict
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.1
* Conflict
* Fixed failed tests
* Lower MinGenesisActiveValidatorCount to 16384 (#4100)
* Fork choice beacon block checks (#4107)

* Prevent future blocks check and test

* Removed old code
* Update aggregation proto (#4121)

* Update def
* Update spec test
* Conflict
* Update workspace
* patch
* Resolve conflict
* Patch
* Change workspace
* Update ethereumapis to a forked branch at commit 6eb1193e47
* Fixed all the tests
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into conflict
* fix patch
* Need to regenerate test data
* Merge branch 'master' into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Enable snappy compression for all (#4157)

* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* Validate aggregate and proof subscriber (#4159)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Conflict
* Update workspace
* Conflict
* Conflict
* Conflict
* Merge branch 'master' into v0.9.2
* Merge branch 'master' into v0.9.2
* Conflict
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Remove migrate to snappy  (#4205)
* Feature flag: Deprecate --prune-states, release to all (#4204)

* Deprecated prune-states, release to all

* imports

* remote unused import

* remove unused import

* Rm prune state test

* gaz
* Refactoring for dynamic pubsub subscriptions for non-aggregated attestations (#4189)

* checkpoint progress

* chkpt

* checkpoint progress

* put pipeline in its own file

* remove unused imports

* add test, it's failing though

* fix test

* remove head state issue

* add clear db flag to e2e

* add some more error handling, debug logging

* skip processing if chain has not started

* fix test

* wrap in go routine to see if anything breaks

* remove duplicated topic

* Add a regression test. Thanks @nisdas for finding the original problem. May it never happen again *fingers crossed*

* Comments

* gofmt

* comment out with TODO
* Sync with master
* Sync with master
* RPC servers use attestation pool (#4223)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Refactor RPC to Fully Utilize Ethereum APIs (#4243)

* include attester as a file in the validator server

* remove old proposer server impl

* include new patch and properly sync changes

* align with public pbs

* ensure matches rpc def

* fix up status tests

* resolve all broken test files in the validator rpc package

* gazelle include

* fix up the duties implementation

* fixed up all get duties functions

* all tests pass

* utilize new ethereum apis

* amend validator client to use the new beacon node validator rpc client

* fix up most of validator items

* added in mock

* fix up test

* readd test

* add chain serv mock

* fix a few more validator methods

* all validator tests passingggg

* fix broken test

* resolve even more broken tests

* all tests passsssss

* fix lint

* try PR

* fix up test

* resolve broken other tests
* Sync with master
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Aggregate and proof subscriber (#4240)

* Added subscribers

* Fixed conflict

* Tests

* fix up patch

* Use upstream pb

* include latest patch

* Fmt

* Save state before head block
* skip tests (#4275)
* Delete block attestations from the pool (#4241)

* Added subscribers
* Clean up
* Fixed conflict
* Delete atts in pool in validate pipeline
* Moved it to subscriber
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into use-att-pool-3
* Test
* Fixed test
* Initial work on voluntary exit (#4207)

* Initial implementation of voluntary exit: RPC call

* Update for recent merges

* Break out validation logic for voluntary exits to core module

* RequestExit -> ProposeExit

* Decrease exit package visibility

* Move to operation feed

* Wrap errors
* Fix critical proposer selection bug #4259 (#4265)

* fix critical proposer selection bug #4259

* gofmt

* add 1 more validator to make it 5

* more tests

* Fixed archivedProposerIndex

* Fixed TestFilterAttestation_OK

* Refactor ComputeProposerIndex, add regression test for potential out of range panic

* handle case of nil validator

* Update validators_test.go
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Leftover merge files, oops
* gaz
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* Fixes Duplicate Validator Bug (#4322)

* Update dict

* Test helper

* Regression test

* Comment

* Reset test cache
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fixes after PR #4328
* Complete attestation pool for run time (#4286)

* Added subscribers

* Fixed conflict

* Delete atts in pool in validate pipeline

* Moved it to subscriber

* Test

* Fixed test

* New curl for forkchoice attestations

* Starting att pool service for fork choice

* Update pool interface

* Update pool interface

* Update sync and node

* Lint

* Gazelle

* Updated servers, filled in missing functionalities

* RPC working with 1 beacon node 64 validators

* Started writing tests. Yay

* Test to aggregate and save multiple fork choice atts

* Tests for BatchAttestations for fork choice

* Fixed exisiting tests

* Minor fixes

* Fmt

* Added batch saves

* Lint

* Mo tests yay

* Delete test

* Fmt

* Update interval

* Fixed aggregation broadcast

* Clean up based on design review comment

* Fixed setupBeaconChain

* Raul's feedback. s/error/err
* resolve conflicts
* Merge branch 'v0.9.2' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Removed old protos and fixed tests (#4336)
* Merge refs/heads/master into v0.9.2
* Disallow duplicated indices and test (#4339)
* Explicit use of GENESIS_SLOT in fork choice (#4343)
* Update from 2 to 3 (#4345)
* Remove verify unaggregated attestation when aggregating (#4347)
* use slot ticker instead of run every (#4348)
* Add context check for unbounded loop work (#4346)
* Revert "Explicit use of GENESIS_SLOT in fork choice (#4343)" (#4349)

This reverts commit d3f6753c77.
* Refactor Powchain Service (#4306)

* add data structures

* generate proto

* add in new fields

* add comments

* add new mock state

* add new mock state

* add new methods

* some more changes

* check genesis time properly

* lint

* fix refs

* fix tests

* lint

* lint

* lint

* gaz

* fix lint

* raul's comments

* use one method

* fix test

* raul's comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Ensure best better-justification is stored for fork choice (#4342)

* Ensure best better-justification is stored. Minor refactor
* Tests
* Merge refs/heads/v0.9.2 into better-best-justified
* Merge refs/heads/v0.9.2 into better-best-justified
* Ensure that epoch of attestation slot matches the target epoch (#4341)

* Disallow duplicated indices and test
* Add slot to target epoch check to on_attestation
* Add slot to target epoch check to process_attestation
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into no-dup-att-indices
* Fixed TestProcessAttestations_PrevEpochFFGDataMismatches
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Update beacon-chain/blockchain/forkchoice/process_attestation_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Filter viable branches in fork choice (#4355)
* Only activate upon finality (#4359)

* Updated functions
* Tests
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into queue-fix-on-finality
* Comment
* Merge refs/heads/v0.9.2 into queue-fix-on-finality
* Fixed failing test from 4359 (#4360)

* Fixed
* Skip registry spec tests
* Wait for state to be initialized at least once before running slot ticker based on genesis time (#4364)
* Sync with master
* Fix checkpoint root to  use genesis block root (#4368)
* Return an error on nil head state in fork choice (#4369)

* Return error if nil head state

* Fixed tests. Saved childen blocks state

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Update metrics every epoch (#4367)
* return empty slice if state is nil (#4365)
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Pubsub: Broadcast attestations to committee based subnets (#4316)

* Working on un-aggregated pubsub topics

* update subscriber to call pool

* checkpointing

* fix

* untested message validation

* minor fixes

* rename slotsSinceGenesis to slotsSince

* some progress on a unit test, subscribe is not being called still...

* dont change topic

* need to set the data on the message

* restore topic

* fixes

* some helpful parameter changes for mainnet operations

* lint

* Terence feedback

* unskip e2e

* Unit test for validate committee index beacon attestation

* PR feedbacK

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into resolveConflicts
* remove condition
* Remove unused operation pool (#4361)
* Merge refs/heads/master into v0.9.2
* Aggregate attestations periodically  (#4376)
* Persist ETH1 Data to Disk (#4329)

* add data structures

* generate proto

* add in new fields

* add comments

* add new mock state

* add new mock state

* add new methods

* some more changes

* check genesis time properly

* lint

* fix refs

* fix tests

* lint

* lint

* lint

* gaz

* adding in new proto message

* remove outdated vars

* add new changes

* remove latest eth1data

* continue refactoring

* finally works

* lint

* fix test

* fix all tests

* fix all tests again

* fix build

* change back

* add full eth1 test

* fix logs and test

* add constant

* changes

* fix bug

* lint

* fix another bug

* change back

* Apply suggestions from code review

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Fixed VerifyIndexedAttestation (#4382)
* rm signing root (#4381)

* rm signing root

* Fixed VerifyIndexedAttestation

* Check proposer slashed status inside ProcessBlockHeaderNoVerify

* Fixed TestUpdateJustified_CouldUpdateBest

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove Redundant Trie Generation (#4383)

* remove trie generation
* remove deposit hashes
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* fix build
* Conflict
* Implement StreamAttestations RPC Endpoint (#4390)

* started attestation stream

* stream attestations test

* on slot tick test passing

* imports

* gaz

* Update beacon-chain/rpc/beacon/attestations_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

Co-authored-by: shayzluf <thezluf@gmail.com>
* Fixed goimport (#4394)
* Use custom stateutil ssz for ssz HTR spec tests (#4396)

* Use custom stateutil ssz for ssz HTR spec tests

* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* set mainnet to be the default for build and run (#4398)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* gracefully handle deduplicated registration of topic validators (#4399)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* SSZ: temporarily disable roots cache until cache issues can be resolved (#4407)

* temporarily disable roots cache until cache issues can be resolved

* Also use custom ssz for spectests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove process block attestations as separate routine (#4408)

* Removed old save/process block atts

* Fixed tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save Deposit Cache to Disk (#4384)

* change to protos

* fix build

* glue everything together

* fix test

* raul's review

* preston's comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Fix activation queue sorting (#4409)

* Removed old save/process block atts

* Fixed tests

* Proper sorting by eligibility epoch then by indices

* Deleted old colde
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' into v0.9.2
* Merge refs/heads/master into v0.9.2
* stop recursive lookup if context is cancelled (#4420)
* Fix proposal bug (#4419)
* Add Pending Deposits Safely (#4422)

* safely prune cache

* use proper method

* preston's,terence's reviews and comments

* revert change to build files

* use as feature config instead
* Release custom state ssz (#4421)

* Release custom state ssz, change all HTR of beacon state to use custom method

* typo

* use mainnet config

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Update initial sync save justified to align with v0.9.3 (#4432)
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fix build
* don't blacklist on pubsub (#4435)
* Fix Flakey Slot Ticker Test (#4434)

* use interface instead for the slot ticker

* fixed up flakey tests

* add gen time

* get duties comment

* fix lifecycle test

* more fixes
* Configurable min genesis delay (#4437)

* Configurable min genesis delay based on https://github.com/ethereum/eth2.0-specs/pull/1557

* remove feature flag for genesis delay

* fix

* demo config feedback
* patch readme
* save keys unencrypted for validators (#4439)
* Add new demo configuration targeting mainnet scale (#4397)

* Add new demo configuration targeting mainnet, with 1/10th of the deposit value

* reduce quotant by 1/10th. Use 1/10th mainnet values

* only change the inactivity quotant

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save justified checkpoint state (#4433)

* Save justified checkpoint state

* Lint

* Feedback

* Fixed test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Update shared/testutil/deposits.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update proto/testing/ssz_regression_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/core/epoch/epoch_processing.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/kv/forkchoice.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/proposer.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/prepare_forkchoice.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/aggregator/server.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/cache/depositcache/pending_deposits.go
* Update beacon-chain/cache/depositcache/pending_deposits_test.go
* Update beacon-chain/rpc/validator/proposer.go
* Merge refs/heads/master into v0.9.2
* Fix e2e genesis delay issues (#4442)

* fix e2e genesis delay issues

* register flag

* typo

* Update shared/featureconfig/config.go

Co-Authored-By: Nishant Das <nishdas93@gmail.com>

* Apply suggestions from code review

Co-Authored-By: Nishant Das <nishdas93@gmail.com>

* skip demo e2e

* fix validator

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* Batch Eth1 RPC Calls (#4392)

* add new methods

* get it working

* optimize past deposit logs processing

* revert change

* fix all tests

* use mock

* lint

* lint

* check for nil

* stop panics

* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Terence's Review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-07 18:47:39 +00:00
Ivan Martinez
9aed0034ec Vastly improve E2E logs and add README (#4440)
* Improve E2E logs to help debugging
* Add README to E2E
* Remove newline logs
* Remove removedb
* Try releasing after killing process
* Fix validator output
* Fix e2e
* Solve eth1 issue by clearing eth1 db
* Whoops
* Fix log spacing
2020-01-07 17:00:51 +00:00
terence tsao
f764522cbe Log warn and cont if validator pub key not exist in DB (#4429)
* log warn and cont
* assignment
* fixed
* Merge refs/heads/master into log-warn-cont
2020-01-06 21:16:36 +00:00
Ivan Martinez
c7ae03e1b2 E2E cleanup and fix ETH1 chain startup (#4431)
* E2E cleanup and fixes

* Fix build issue
2020-01-06 14:50:36 -06:00
Preston Van Loon
4efc0f5286 Require a state to exist to save justified checkpoint (#4423)
* Add validation to save justified checkpoint in db
* gofmt
2020-01-06 14:41:51 +00:00
Preston Van Loon
9052620453 Release feature --fast-assignments (#4416)
* Deprecated --fast-assignments
* gaz
* Merge branch 'master' of github.com:prysmaticlabs/prysm into release-fast-assignments
2020-01-06 02:57:52 +00:00
Preston Van Loon
0174397f6e Release --enable-bls-pubkey-cache (#4417)
* Release bls pubkey-cache
2020-01-05 20:08:49 +00:00
terence tsao
9f5caf8fea total and target balances metrics (#4414) 2020-01-05 11:12:48 -08:00
Nishant Das
3b3f2c78e2 unskip test (#4411) 2020-01-05 13:39:14 +08:00
Ivan Martinez
242e4bccbf Move confirmDelete from beacon-chain to shared/cmd (#4410)
* Move confirmDelete to shared/cmd as ConfirmAction

* Finish moving function to shared/cmd

* Pass in both text

* Fix for comments
2020-01-04 23:32:09 -05:00
terence tsao
59ab89c98a Validator caches index (#4406) 2020-01-04 11:50:16 -08:00
terence tsao
ac768207ac Safe delete states (#4401)
* Filter block roots by finalization and head
* Tests
* Comments
* Merge branch 'master' into safe-delete-states
* Fixed exisiting tests
* Merge branch 'safe-delete-states' of git+ssh://github.com/prysmaticlabs/prysm into safe-delete-states
* Merge refs/heads/master into safe-delete-states
* Merge refs/heads/master into safe-delete-states
* Merge refs/heads/master into safe-delete-states
* Merge refs/heads/master into safe-delete-states
2020-01-04 19:20:20 +00:00
terence tsao
77d41024dc Revert "Use poststate for calculating att votes (#4395)" (#4404) 2020-01-04 09:25:42 -08:00
Preston Van Loon
f03083f6c8 PK manager: don't panic on bad key (#4405)
* don't panic on bad key
2020-01-04 05:32:47 +00:00
Jim McDonald
5ff9ae2108 Validator keymanager refactor (#4340)
* Move to keymanager
* Move to keymanager
* Merge branch 'keymanager' of github.com:mcdee/prysm into keymanager
* Lint
* Fix visibility
* Bazel fix
* Merge remote-tracking branch 'upstream/master' into keymanager
* logrus->log
* Merge branch 'master' into keymanager
* Merge remote-tracking branch 'upstream/master' into keymanager
* Merge branch 'master' into keymanager
* Merge branch 'master' into keymanager
* Merge branch 'master' into keymanager
* Merge branch 'master' into keymanager
* Fix test after merge
* Merge branch 'master' into keymanager
* And again
2020-01-04 03:51:53 +00:00
terence tsao
5fa03edb29 Update committee cache prev epoch (#4402)
* Update cache base on input epoch, not state epoch
* Tests
* Fixed benchmarks
* Use epochs
* One more
2020-01-03 23:47:54 +00:00
Preston Van Loon
ebe4c9c971 Use a single lock for arrays cache (#4400)
* use a single lock for arrays cache
* Merge refs/heads/master into one-lock
2020-01-03 20:24:19 +00:00
Preston Van Loon
6efe5ef496 Slot ticker: panic on zero genesis time given (#4366)
* panic on zero genesis time given

* fix test

* fix test

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-03 13:42:35 -06:00
Celeste Ariana Seberras
fbbf5514d1 Syncing gitbook information with README (#4323)
* Syncing gitbook information with README

Updated to match https://prysmaticlabs.gitbook.io/prysm/
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Curl readded
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
2020-01-03 19:05:16 +00:00
terence tsao
220af25bce Use poststate for calculating att votes (#4395)
* Use poststate for votes
* Merge branch 'master' into use-post-state
* Merge branch 'master' into use-post-state
2020-01-03 18:29:26 +00:00
Nishant Das
c9252c06c4 Change Skip Slot Cache Key (#4391)
* use different cache key

* add build

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-01-03 11:44:48 -06:00
Jim McDonald
2c565f5d59 Harden BLS (#4393) 2020-01-03 07:34:15 -08:00
Ivan Martinez
1cb58e859e Add protos for validator proposal slashing protection (#4387)
* Add protos for validator proposal protection
* Fix formatting
* Fix formatting
* Rename protos
* remove extra line
2020-01-03 02:41:31 +00:00
terence tsao
d26839c1f2 Add aggregator indices to logs (#4385)
* Add validator_log.go

* Use new logging scheme

* Go fmt

* Better name

* Tests

* Tests

* Add wg.done, moved logging before span end

* Add aggregator indices to submit attestation log

* Rename

* Fixed test

* Add proposer index

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-02 17:45:48 -06:00
terence tsao
2cb8430ad4 Enhance attester logging (#4380)
* Add validator_log.go
* Use new logging scheme
* Go fmt
* Better name
* Tests
* Tests
* Merge refs/heads/master into better-logging
* Merge branch 'master' of git+ssh://github.com/prysmaticlabs/prysm into better-logging
* Add wg.done, moved logging before span end
* Merge branch 'better-logging' of git+ssh://github.com/prysmaticlabs/prysm into better-logging
2020-01-02 17:04:07 +00:00
Nishant Das
03356fc7b5 Add Ability to Resync Node (#4279)
* add resyncing functionality

* add more validation to status message

* lint and build

* jim's review

* preston's review

* clean up

* remove log

* remove no sync

* change again

* change back

* remove spaces

* Update shared/slotutil/slottime.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Apply suggestions from code review

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* fix refs

* raul's review

* goimports

* goimports

* add counter

* removed condition

* change back

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-01-02 16:09:28 +08:00
Preston Van Loon
bdc4045e23 Add eth1data deposit count metric (#4374) 2019-12-30 08:34:46 -08:00
Preston Van Loon
dc1bd1ef62 Revert 4372 and 4373 (#4375)
* Revert "only add chain start deposits up to min genesis active validator count (#4373)"

This reverts commit 35380dd9bf.
* Revert "Return an error if the wrong number of deposits are provided for genesis state (#4372)"

This reverts commit 9674575892.
2019-12-30 01:21:08 +00:00
Preston Van Loon
35380dd9bf only add chain start deposits up to min genesis active validator count (#4373) 2019-12-29 16:27:17 -08:00
Preston Van Loon
9674575892 Return an error if the wrong number of deposits are provided for genesis state (#4372)
* Return an error if the wrong number of deposits are provided for genesis state
* add regression test
2019-12-29 20:10:23 +00:00
Nishant Das
b7d0d7cbb6 Shift Deposit Contract Tools (#4357)
* move tools
* Merge refs/heads/master into shiftTools
* Merge refs/heads/master into shiftTools
2019-12-27 00:41:43 +00:00
Nishant Das
28eadac172 Fix Deposit Log Processing (#4352)
* fix log processing
* Merge branch 'master' into fixLogs
* Merge refs/heads/master into fixLogs
2019-12-26 17:44:56 +00:00
Nishant Das
d5181496c4 Add Docker image for slasher (#4356)
* add docker image for slasher

* load docker rules

* change to c base image

* switch off pure builds
2019-12-26 10:53:27 -06:00
Nishant Das
b337a5720c Handle Pubsub Panics (#4350)
* handle panics
* lint
* gaz
* preston's review
2019-12-24 04:59:08 +00:00
terence tsao
53b8eb57ee Fuzz ProcessFinalUpdates (#4308) 2019-12-23 10:27:16 -08:00
terence tsao
30b4b045f5 Add justified check points to chain info getters (#4335)
* Add justified checkpoint getters

* Use it for chainhead

* Mock

* Fixed tests

* Fixed TestServer_StreamChainHead_OnHeadUpdated

* Caught a run time bug. Fixed

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2019-12-22 00:14:23 -06:00
Ivan Martinez
ec1e7ae005 Remove proto/sharding and move slashing to own dir (#4332)
* Clean proto and move slasher proto to own folder

* Change package name to match files

* Fix typo

* Fix tests

* Undo out of scope changes

* Run gazlle

* Fix build.bazel

* goimports
2019-12-20 21:47:00 -06:00
Preston Van Loon
a949673e33 Pubsub ignore messages from yourself (#4337)
* ignore messages from myself
2019-12-20 19:37:25 +00:00
terence tsao
996f4c7f5a Clean up list beacon committees to not use head state (#4333) 2019-12-20 08:02:12 -08:00
Valentin Mihov
3915a6e15a Fix the links in the TOC (#4334)
The links for running the client, were pointing to wrong sections.
2019-12-20 18:44:03 +08:00
Preston Van Loon
961dd21554 Use libp2p gossipsub upstream validator framework (#4318)
* add reject all pubsub validator to stop automatic propagation of messages
* gaz
* Merge branch 'master' of github.com:prysmaticlabs/prysm into pubsub-validator
* refactor p2p validator pipeline
* add sanity check
* Merge branch 'pubsub-validator' of github.com:prysmaticlabs/prysm into pubsub-validator
* fixed up test
* rem
* gaz
* Merge refs/heads/master into pubsub-validator
* fix from self test
* ensure validator data is set
* resolve todo
* Merge refs/heads/master into pubsub-validator
* gaz
* Merge refs/heads/master into pubsub-validator
* Merge branch 'pubsub-validator' of github.com:prysmaticlabs/prysm into pubsub-validator
* Merge refs/heads/master into pubsub-validator
* remove all of the 'from self' logic. filed https://github.com/libp2p/go-libp2p-pubsub/issues/250
* Merge branch 'pubsub-validator' of github.com:prysmaticlabs/prysm into pubsub-validator
* gaz
* update comment
* Merge refs/heads/master into pubsub-validator
* rename "VaidatorData"
* Merge branch 'pubsub-validator' of github.com:prysmaticlabs/prysm into pubsub-validator
* refactor
* one more bit of refactoring
* Update beacon-chain/sync/validate_beacon_attestation.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* skip validation on self messages, add @nisdas feedback to increment failure counter
* Merge branch 'pubsub-validator' of github.com:prysmaticlabs/prysm into pubsub-validator
* remove flakey
2019-12-20 03:18:08 +00:00
terence tsao
2e4908e7c4 Optimize committee helpers (#4328) 2019-12-19 15:40:51 -08:00
Preston Van Loon
da637668a8 Minor fixes to create keys errors (#4330)
* minor fixes
2019-12-19 19:44:06 +00:00
Jim McDonald
20168ad729 More complete validator metrics (#4327)
* More complete validator metrics
* Merge branch 'master' into metrics
* Merge branch 'master' into metrics
2019-12-19 16:14:44 +00:00
Jim McDonald
0b07a9f227 Migrate periodic function to use RunEvery (#4324) 2019-12-19 07:02:10 -08:00
Jim McDonald
5dca662d01 Comment typo (#4325) 2019-12-19 06:01:23 -08:00
Nishant Das
8c28d1080c Revert "Fix same deposits from same validator in same block" (#4321)
* Revert "Fix same deposits from same validator in same block (#4319)"

This reverts commit 908d220eb2.
2019-12-19 05:36:19 +00:00
Raul Jordan
6a54a430e1 Add Filter by Epoch in kv/blocks.go (#4303)
* allow for epoch based filtering
* modify repo to include filter by epoch
* resolve items
* revamped to use epoch filter
* Merge branch 'master' into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* gazelle rem unused
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Merge refs/heads/master into roots-by-epoch
* Update beacon-chain/db/kv/blocks_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>
* Update beacon-chain/db/kv/blocks_test.go

Co-Authored-By: shayzluf <thezluf@gmail.com>
* fmt
* lint res
2019-12-19 00:15:31 +00:00
terence tsao
908d220eb2 Fix same deposits from same validator in same block (#4319)
* Update dict

* Test helper

* Regression test

* Comment

* Reset test cache
2019-12-18 16:53:30 -06:00
Preston Van Loon
ff1fd77425 Build docker images for non-root user (#4320)
* build docker images as non-root user
* search and replace mistake
* buildifer
* Change uid to 1001
2019-12-18 20:52:25 +00:00
Nishant Das
e27bc8312f Persist ETH1 Information (#4305)
* add data structures
* generate proto
* add in new fields
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into saveETH1Data
* add comments
* Merge branch 'master' into saveETH1Data
* remove file
* Merge branch 'saveETH1Data' of https://github.com/prysmaticlabs/geth-sharding into saveETH1Data
* Merge branch 'master' into saveETH1Data
* Merge branch 'master' into saveETH1Data
* Merge refs/heads/master into saveETH1Data
* Merge refs/heads/master into saveETH1Data
2019-12-18 05:57:54 +00:00
Jim McDonald
78968c1e29 Add individual p2p host counts (#4312)
* Add individual p2p host counts
* Merge branch 'master' into p2pmetrics
* Merge branch 'master' into p2pmetrics
* Merge branch 'master' into p2pmetrics
* Merge branch 'master' into p2pmetrics
2019-12-18 05:31:46 +00:00
terence tsao
fb431c11c1 Process slots exit early if same slot (#4314)
* skip process slot if it's same slots
* Merge branch 'master' into optimize-process-slots
* Merge refs/heads/master into optimize-process-slots
2019-12-18 04:37:28 +00:00
Raul Jordan
30ed59e9c8 Make Sure ChainHeadStream Remains Open (#4282)
* do not return from stream
* fix test
* Merge branch 'master' into no-stream-return
* Merge refs/heads/master into no-stream-return
2019-12-18 04:07:11 +00:00
Jim McDonald
2e2d5199e8 Remove ChainStartFeed mocks (#4310)
* Remove ChainStartFeed from interop service
* Remove final ChainStartFeed mocks
* Gazelle
* Merge branch 'master' into coldstart
* Merge branch 'master' into coldstart
* Merge branch 'master' into coldstart
2019-12-18 03:36:07 +00:00
Raul Jordan
4fe31cf1b3 Add Benchmarks for Custom SSZ Hash Tree Root (#4313)
* bench ssz tree root
* more benches
* Merge branch 'master' into ssz-bench
2019-12-18 02:57:40 +00:00
Preston Van Loon
e82e582cdf Config to exclude kafka dep at build time (#4309)
* add flag to exclude kafka
* Add config flag to exclude kafka
* Merge branch 'master' into buildtime-exclude-kafka
2019-12-18 02:07:49 +00:00
Jim McDonald
0b2d9d8576 Tidy up interop commands (#4311) 2019-12-17 15:49:21 -08:00
Preston Van Loon
65e3f3e007 Add pubsub message ID function (#4304)
* add pubsub message ID
* thanks linter
* Update rules_go, gogo protobuf, comment
* Merge branch 'master' into add-msg-fn-id
2019-12-17 05:17:54 +00:00
Preston Van Loon
2c28e4e7a3 Improvements to Committee Assignments for multiple key requests (#4294)
* Add committees helper, benchmark, results show 62ms for 8k validators which was previously 4 minutes
* Add regression test with same data
* fix epoch conversion
* lint
* undo and lint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into zoom-zoom-assignments
* remove validaotr index span
* fix comment, add test to test against spec definition method for consistency.
* Deprecate CommitteeAssignment, delete unused reference to CommitteeAssignment
* Merge branch 'master' of github.com:prysmaticlabs/prysm into zoom-zoom-assignments
* remove new line
* make test be more complicated with validators activated in an epoch transition
* add feature flag for fast-assignments
* Merge branch 'master' of github.com:prysmaticlabs/prysm into zoom-zoom-assignments
* gaz, gofmt, add deprecated code back
* Update beacon-chain/core/helpers/committee.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into zoom-zoom-assignments
* Merge refs/heads/master into zoom-zoom-assignments
* Merge refs/heads/master into zoom-zoom-assignments
2019-12-17 03:05:26 +00:00
Raul Jordan
642254daa6 Update README Instructions to Resolve Deposit Contract Address (#4302)
* dep addr resolver
* Merge refs/heads/master into update-read
2019-12-17 02:52:54 +00:00
Nishant Das
c41140e15a Optimize Insertion in Deposit Trie (#4299)
* current changes
* change algorithm for tree insert
* almost done with getting this to pass
* unit test passes
* tests now pass
* fix in repo
* Merge branch 'master' into optimizeDepositLogs
* fix build
* Merge branch 'optimizeDepositLogs' of github.com:prysmaticlabs/prysm into optimizeDepositLogs
* remove tautology
* fix tautology
* fix up sparsity
* Merge branch 'master' into optimizeDepositLogs
* further fixes
* Merge branch 'optimizeDepositLogs' of github.com:prysmaticlabs/prysm into optimizeDepositLogs
* Update shared/trieutil/sparse_merkle.go
* comments
* Merge branch 'optimizeDepositLogs' of github.com:prysmaticlabs/prysm into optimizeDepositLogs
* add bench for optimized
* gaz
* Merge refs/heads/master into optimizeDepositLogs
2019-12-17 02:19:12 +00:00
terence tsao
23a6c20dd4 Service as proper names (#4293) 2019-12-16 19:53:55 -06:00
Preston Van Loon
514f5f904f Add prometheus gRPC time histograms (#4300)
* Add grpc_prometheus.EnableHandlingTimeHistogram()
* Merge refs/heads/master into enable-prom-hist
2019-12-16 22:00:34 +00:00
Preston Van Loon
5844436716 Don't serialize bls signature just to deserialize it again (#4298)
* Don't serialize bls signature just to deserialize it again
* gaz
* Merge branch 'master' into minor-thing
* Merge branch 'master' into minor-thing
2019-12-16 19:01:40 +00:00
terence tsao
5879b26b4b Hardening Committee Cache for Runtime (#4270) 2019-12-16 10:14:21 -08:00
terence tsao
566efaef89 Optimize aggregator process slots (#4297)
* Advance slots up to epoch start
* Merge branch 'master' into opt-process-slots-aggregator
* Merge branch 'master' into opt-process-slots-aggregator
2019-12-16 17:43:33 +00:00
Jim McDonald
d9062a7e30 Use RunEvery in place of custom tickers (#4290) 2019-12-16 11:00:15 -06:00
Preston Van Loon
3f344aee55 add a few fuzz tests (#4291) 2019-12-16 00:52:20 -06:00
Nishant Das
fd93751bf7 Fix Goerli Faucet (#4289)
* fix faucet

* minor fixes
2019-12-15 08:21:29 -06:00
Preston Van Loon
325a2503f7 AttestingIndices: Make beacon committee be an argument (#4284)
* make beacon committee be an argument
* remove state from ConvertToIndexed
* Merge branch 'master' into refactor-AttestingIndices-committee
* Merge branch 'master' into refactor-AttestingIndices-committee
* Merge branch 'master' into refactor-AttestingIndices-committee
* Merge refs/heads/master into refactor-AttestingIndices-committee
2019-12-15 05:02:50 +00:00
Preston Van Loon
2179ac683e Fuzz testing for custom state ssz (#4234)
* Add a random fuzz test to ssz to capture panics and compare the effectiveness of the cache. This comment shows a difference in state root calculation 52% of the time and what is even more concering is that spec tests pass with the flag on.
* added case for one
* bring down failure rate
* prevent caching operations if no cache enabled
* unit test and pretty printer
* identify further sources of problems
* no more panics
* not panicking anymore
* fix lint
* Merge branch 'master' into fuzz-ssz
* Merge branch 'master' into fuzz-ssz
* passing up to 68
* Merge branch 'fuzz-ssz' of github.com:prysmaticlabs/prysm into fuzz-ssz
* need to find the culprit for 100
* 100 passes, now only 16 out of 1000
* state roots being mutated
* one out of 10k
* fuzzing stuff
* fix up lint
* Merge branch 'master' into fuzz-ssz
* cleanup
* fixing more comments
* Merge branch 'master' into fuzz-ssz
2019-12-15 04:32:19 +00:00
terence tsao
0f4dabfad8 Fix cloning target state for check point state cache (#4288) 2019-12-14 16:06:30 -08:00
terence tsao
8724dcd41b Sort received atts by sig (#4287) 2019-12-14 10:51:27 -06:00
Jim McDonald
89e1200b73 Add ticker shared helper (#4285) 2019-12-13 15:14:56 -08:00
metanull-operator
0f677a09b6 Added 'Prysm' to version information. (#4281)
* Added 'Prysm' to version information.
* Merge branch 'master' into versionUpdate
2019-12-13 18:52:28 +00:00
Nishant Das
c5dcf49ded Add Flag For Minimum Handshakes (#4280)
* add flag
* jim and preston's review
* check max peers
* gaz
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into minStatusCount
* remove space
* add references
* add warning log
* change log
* gaz
2019-12-13 15:12:49 +00:00
terence tsao
a5881f924f Deprecate active count and committee cache flags (#4276)
* Deprecate active count and committee cache flags
* Merge branch 'master' into deprecate-flags
2019-12-13 14:00:29 +00:00
shayzluf
d93ec64b21 Slasher Grpc client (#4230)
* grpc connection
* fix order
* Merge branch 'fixInteropGenesis' of https://github.com/prysmaticlabs/prysm into grpc_client
* gaz
* grpc setup
* running version
* added comments
* Merge branch 'master' of github.com:prysmaticlabs/prysm into grpc_client
* fix test
* terence feedback
* terence feedback
* feedback changes
* feedback changes
* comment fix
* Merge branch 'master' of github.com:prysmaticlabs/prysm into grpc_client
* logging when there is no chain head
* rename function
* terence and nishant feedback
* fix imports
* nishant feedback
* fix wait for stop
* fix imports
* fix tests
2019-12-13 07:31:37 +00:00
Raul Jordan
a9a5973b98 Add Getter for Genesis Block (#4271)
* test passing

* kafka
2019-12-12 16:27:22 -06:00
Jim McDonald
570efe3d04 Give peers a chance (#4268)
* Add decay function for peer badresponses count
* Activate peer decay in p2p
2019-12-12 14:34:28 +00:00
Raul Jordan
2e9c3895f4 Bring Back Epoch Filtering for ListBlocks API (#4262)
* bring back the epochs!
* fix up
* Merge refs/heads/master into bring-back-epoch-filter
* add in patch
* Merge branch 'bring-back-epoch-filter' of github.com:prysmaticlabs/prysm into bring-back-epoch-filter
* import spacing
* lint
* build
* gaz
* Merge refs/heads/master into bring-back-epoch-filter
* gaz
* Merge branch 'bring-back-epoch-filter' of github.com:prysmaticlabs/prysm into bring-back-epoch-filter
* move back perf
* update ethapis
* fix build
* Merge refs/heads/master into bring-back-epoch-filter
2019-12-12 02:27:19 +00:00
terence tsao
9033f6801b Removed active count and shuffling cache (#4266)
* Removed
* All tests pass
* Gaz
* Removed new lines
* A few more lines...
* I think i got them all
* and I didnt : )
* Could this be last...
2019-12-12 01:15:44 +00:00
Preston Van Loon
c0b3767757 remove old cache for active indices. this is not used in production and will soon be replaced (#4264) 2019-12-11 15:48:48 -06:00
Preston Van Loon
e72ff1bb4f Add unit test to ActiveValidatorIndices (#4263)
* Add regression test to ActiveValidatorIndices

* fix test, more comments

* imports
2019-12-11 12:27:25 -08:00
Jim McDonald
0cb59bb018 Tidy up "Requesting blocks" log in initial sync (#4256)
* Tidy up log
* Merge branch 'master' into logfix
2019-12-11 17:32:38 +00:00
Preston Van Loon
6e549c90ba Initialize server context for beacon server (#4260)
* Initialize server context for beacon server
* Merge branch 'master' into fix-4254
2019-12-11 16:37:05 +00:00
Jim McDonald
813233373e Advanced peer tracking (#4233)
* Advanced peer status

* Rework errors; add tests

* Gazelle

* time->roughtime

* Update beacon-chain/p2p/handshake.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update beacon-chain/p2p/interfaces.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Downgrade log

* Tidy up handshaking logic and commentary

* Downgrade log message

* Protect connected peers from disconnection; increase high water level to avoid bad interactions at maxPeers
2019-12-11 18:31:36 +08:00
Preston Van Loon
5757ce8894 Fix flaky TestKV_Aggregated_CanSaveRetrieve (#4253)
* Update aggregated_test.go
2019-12-11 06:08:54 +00:00
Nishant Das
7c11367cd8 Update SSZ (#4252)
* update ssz to latest
* Merge branch 'master' into updateSSZ
* Merge refs/heads/master into updateSSZ
2019-12-11 05:16:37 +00:00
terence tsao
6d2c37caf1 Removed process epoch (#4251) 2019-12-10 20:40:14 -08:00
Ivan Martinez
812311f6f7 Add more detail to README and add benchmark for HashTreeRootState (#4247)
* Add more detail to readme and add benchmark for HashTreeRootState
* Add hashtreerootstate benchmark results to readme
* Merge branch 'master' into benchmarks-readme
2019-12-11 00:14:33 +00:00
terence tsao
22d81ef0ed Update process_epoch benchmark (#4245)
* Update to ProcessEpochPrecompute
* Comment
* Add b.N back
2019-12-10 23:32:11 +00:00
Ivan Martinez
414fcda9a2 Change jaeger default endpoint (#4242)
* Change jaeger default
2019-12-10 19:45:14 +00:00
Nishant Das
bb2fc4cd5e Fix Deposit Sender Utility (#4239)
* add in fix

* change gas limit
2019-12-10 09:35:24 -08:00
Nishant Das
5fd6a92052 Fix DiscoveryV5 (#4237)
* add fallback

* fix test
2019-12-10 13:35:16 +08:00
Nishant Das
7ccbe48f54 fix order (#4228) 2019-12-09 22:31:53 +08:00
Preston Van Loon
7a46cc0681 Enforce stronger head state operations (#4216)
* Enforce stronger head state operations

* fix genesis state generation

* one test left to fix

* all tests passing now

* gofmt

* Update beacon-chain/db/kv/state_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update beacon-chain/db/kv/state.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* fix tests
2019-12-09 15:35:18 +08:00
Preston Van Loon
92d21c72b8 Skip accessing peer status if it does not exist (#4226)
* skip accessing peer status if it does not exist
* Merge refs/heads/master into fix-panic-rr
2019-12-08 23:30:39 +00:00
Preston Van Loon
0cb681476e Add span for saveCheckpointState (#4227)
* Add span for saveCheckpointState
* Add span for saveCheckpointState
2019-12-08 22:53:09 +00:00
Preston Van Loon
fa7b8ab60d A few improvements to handshake (#4214)
* A few improvements to handshake and exit round robin
* revert beacon-chain/sync/initial-sync/round_robin.go
* Merge refs/heads/master into p2p-fixes
* make handshake non-blocking
* Merge branch 'p2p-fixes' of github.com:prysmaticlabs/prysm into p2p-fixes
* Merge refs/heads/master into p2p-fixes
* Merge refs/heads/master into p2p-fixes
* Merge refs/heads/master into p2p-fixes
* Update handshake.go
2019-12-08 05:12:56 +00:00
Preston Van Loon
bdb80271a3 Allow faucet on prylabs.network (#4220)
* fix faucet hostname issue
2019-12-08 03:01:44 +00:00
terence tsao
1b8eb16fc7 --initial-sync-cache-state don't need to save head root (#4219)
* Test

* Run time works

* Revert
2019-12-07 16:45:39 -06:00
metanull-operator
1222ebb6db Graffiti flag (#4213)
* Implementation of graffiti flag without tests.
* Updated to pass graffiti as string instead of []byte all the way to the ProposeBlock RPC call. This ensures that the ToBytes32() call is handled in ProposeBlock as opposed to relying on the caller to ensure that the value passed is only 32 bytes. This adds work by doing that conversion on each proposed block for a static value of graffiti, but it also helps protect against an RPC call to ProposeBlock that has more than 32 bytes for graffiti.
* Added test case for validator.
* Added GraffitiFlag to validate usage test.
* Updated data structures and logic to convert graffiti flag from string to byte array earlier in the process. Now converting when setting up ValidatorService.
* Updated test case to correctly set up validator using byte array.
* Merge branch 'master' into graffitiFlag
2019-12-07 19:13:56 +00:00
terence tsao
3e15e2fc1e Add operation feed (#4215)
* Events
* Notifiers
* Refactor
* Gaz
* Fixed rest
* Lint
* Lint
* Visibility
* Typo
* Typo
* Apply suggestions from code review

Co-Authored-By: Nishant Das <nishdas93@gmail.com>
2019-12-07 17:57:26 +00:00
Nishant Das
667466020e Change All Caches To Ristretto (#4208)
* new caches
* goimports, gaz
* fix all tests
* Merge branch 'swapP2PCaches' of https://github.com/prysmaticlabs/geth-sharding into swapP2PCaches
* remove from bls
* remove ccache
* fix handshake
* Merge branch 'master' into swapP2PCaches
* gofmt
* Merge branch 'master' into swapP2PCaches
2019-12-06 20:06:37 +00:00
terence tsao
f63ab1e136 Remove formatting error for signature fail to verify (#4211)
* Remove formatting error for sig
* Update beacon-chain/core/blocks/block_operations.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge branch 'master' into sig-error-log
2019-12-06 18:21:57 +00:00
Preston Van Loon
6841d96f36 Remove formatting from error (#4210)
* remove formatting from error
* Fix err
2019-12-06 17:48:38 +00:00
Preston Van Loon
cae24068d4 prevent OR on bitlists of different length (#4209)
* prevent OR on bitlists of different length
* prevent OR on bitlists of different length
2019-12-06 14:33:40 +00:00
terence tsao
dc0b8fad4f Move recently seen roots (#4206)
* Move recently seen roots earlier
* Preston's feedback
2019-12-06 06:30:43 +00:00
Preston Van Loon
d3375d98a8 Kafka exporter (#3840)
* abstract db interface, kafka build, work in progress
* checkpoint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* feature flag
* move passthrough
* flag change
* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* missing db methods
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* fix interface
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* try using cmake built from source
* lint godocs
* lint godocs
* lint godocs
* Update BUILD.bazel
* Merge branch 'master' into es-exporter
* Merge branch 'master' into es-exporter
* Merge branch 'master' into es-exporter
* Merge branch 'master' of github.com:prysmaticlabs/prysm into es-exporter
* gaz
2019-12-06 02:05:58 +00:00
terence tsao
9d4c7cb4f7 Use cache state during init sync (#4199)
* Initial sync state cache
* Gaz
* Gaz
* Don't save head root
* Fix config validator
* Uncomment save head
* Merge branch 'master' into initial-sync-no-verify
* Minor refactor
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Merge branch 'master' into initial-sync-no-verify
* Tests
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Merge branch 'master' into initial-sync-no-verify
* Add lock
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Tests
* Removed save head
* One more test
* Merge branch 'master' into initial-sync-no-verify
* Raul's feedback
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Comment
* Gazelle
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* revert
* Update beacon-chain/blockchain/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
* Fixed test
* Fixed feature flag
* Merge branch 'master' into initial-sync-no-verify
* Fixed cache gensis state test
* Merge branch 'initial-sync-no-verify' of https://github.com/prysmaticlabs/prysm into initial-sync-no-verify
2019-12-06 00:49:19 +00:00
Raul Jordan
ae2b2e74ca Create Functional Cache for Custom State SSZ (#4197)
* better abstraction
* using ristretto
* begin on custom, cached array roots merkleization
* do cache initialization
* passing with new cache
* works
* fix up test
* fixed up cache
* include proper comments
* remove old hash tree root
* rem validator bottleneck
* gaz
* Merge branch 'master' into caching-ssz
* optimized!!!!
* Merge branch 'caching-ssz' of github.com:prysmaticlabs/prysm into caching-ssz
* add mutex
* Merge branch 'master' into caching-ssz
* add read lock
* fmt
* add mathutil
* Merge branch 'master' into caching-ssz
* Merge refs/heads/master into caching-ssz
* Merge refs/heads/master into caching-ssz
* Merge refs/heads/master into caching-ssz
2019-12-05 20:23:59 +00:00
Ivan Martinez
83179376d4 Cleanup testutil and change name scheme to reference deterministic (#4167)
* Clean testutil, change tool names to Deterministic
* Cleanup errors
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into clean-testutil
* Fix bug with generating deposits
* Fix a few tests
* Fix most tests
* Clean up some tests
* Remove err pt. 1
* Remove err pt. 2
* Change tests to use genesis state util
* Remove err from deposits
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Remove circular dependency
* Remove uncompressed signature test
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Merge branch 'master' into clean-testutil
* Goimports
* gazelle
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Add back error handling
* New attestation pool (#4185)

* New pool
* Better namings
* Fmt
* Gazelle
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into define-pool
* Raul's feedback
* Raul's feedback
* Log peer conected log for incoming connections (#4173)

* Log peer conected log for incoming connections
* Merge branch 'master' into peerconnected
* Merge branch 'master' into peerconnected
* Update handshake.go
* Update handshake.go
* Merge branch 'master' into peerconnected
* Merge branch 'master' into peerconnected
* Attestation pool to use go-cache (#4187)
* Update EthereumAPIs  (#4186)

* include new patch targeting latest ethapis master
* ensure project builds
* Merge branch 'master' into update-all-api
* fix up committees
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* include latest eth apis
* Merge branch 'master' into update-all-api
* update block tests
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* Merge branch 'master' into update-all-api
* add todos
* Implement GetValidator RPC Endpoint (#4188)

* include new patch targeting latest ethapis master
* ensure project builds
* Merge branch 'master' into update-all-api
* fix up committees
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* include latest eth apis
* Merge branch 'master' into update-all-api
* update block tests
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* Merge branch 'master' into update-all-api
* add todos
* implement get validator rpc
* add test for get validator
* table driven test
* fix up test
* fix confs
* tests for more cases
* fix up tests and add out of range
* Slasher optimization (#4172)

* size

* batching and concurrency improvements

* gaz

* merge fixes

* fix comment

* fix test

* fix test

* fix build

* ethpb

* ethpb

* fix test

* fix comment

* add benchmark

* fix benchmark
* Handle error for all testutil uses
* Fix errors
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Revert error handling

Revert "Fix errors"

This reverts commit db081f5486.

Revert "Handle error for all testutil uses"

This reverts commit bdabef2306.

Revert "Add back error handling"

This reverts commit da7e3d2020.
* Change genesis state func to use testing.T
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Fix conflict
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Merge branch 'master' into clean-testutil
* Merge branch 'master' into clean-testutil
* Captialize other logs
* Merge branch 'clean-testutil' of https://github.com/0xKiwi/Prysm into clean-testutil
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into clean-testutil
* Merge branch 'master' into clean-testutil
2019-12-05 19:51:33 +00:00
Nishant Das
c36a852329 Swap to Ristretto Cache (#4070)
* add new cache
* change to larger size
* Merge branch 'master' into swapCache
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into swapCache
* remove imports
* cache fixes
* Merge branch 'master' into swapCache
* add better costing
* Merge branch 'swapCache' of https://github.com/prysmaticlabs/geth-sharding into swapCache
* comment
* change back to var
* Merge branch 'master' into swapCache
* Merge branch 'master' into swapCache
* Merge branch 'master' into swapCache
* Merge branch 'master' into swapCache
* Merge refs/heads/master into swapCache
2019-12-05 19:13:11 +00:00
Jim McDonald
650a278fee Harden BLS against invalid input (#4203)
* Harden BLS against invalid input
* Merge branch 'master' into blsharden
* Merge branch 'master' into blsharden
* Merge branch 'master' into blsharden
2019-12-05 18:33:29 +00:00
Ivan Martinez
6816337589 Make logs more helpful for E2E (#4198)
* Make logs more helpful for E2E
* gofmt
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into helpful-logs
* Add extra info
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into helpful-logs
* gofmt and fix error output
* Use errors
* Gazelle
* Revert "gofmt and fix error output"

This reverts commit 9fc85f2dd2.
* Formatting and fix
* add f
* Add more details to logs
* Merge branch 'master' into helpful-logs
* Change text a bit
* Merge branch 'helpful-logs' of https://github.com/0xKiwi/Prysm into helpful-logs
* Merge branch 'master' into helpful-logs
2019-12-05 18:00:55 +00:00
Preston Van Loon
2950e4aeb4 Faucet: Add score in error (#4200)
* Update server.go
* Merge refs/heads/master into prestonvanloon-patch-2
2019-12-05 17:37:19 +00:00
Jim McDonald
746cc142d0 Remove erroneous err (#4202) 2019-12-05 05:53:39 -08:00
Ivan Martinez
261428118e Isolate BLS pubkey cache to only when cache is enabled (#4195)
* Only add to cache when cache is enabled
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into isolate-cache
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into isolate-cache
* Merge branch 'master' into isolate-cache
2019-12-05 02:56:36 +00:00
Preston Van Loon
544e5309ad Faucet: Use client IP in captcha requests (#4196)
* Use client IP in captcha requests
2019-12-05 02:18:15 +00:00
terence tsao
23dd951e59 Chain head state return nil instead of err (#4193)
* Return nil instead of err
* Preston's feedback
* Merge branch 'master' into return-nil
2019-12-04 23:48:30 +00:00
Preston Van Loon
498417a8fc Wait for 3 peers to start sync (#4194)
* Update service.go
2019-12-04 23:09:47 +00:00
Preston Van Loon
617325b726 Faucet improvements (#4192)
* Add faucet reCaptcha improvements in verification
* Add faucet reCaptcha improvements in verification
* add roughtime
2019-12-04 20:33:46 +00:00
Raul Jordan
9e5cc81340 Implement Prysm-Specific HashTreeRootState (#4077)
* new ssz hash tree root
* Merge branch 'master' into new-ssz-state
* better comments on func
* add errors instead of panic in state
* utilize errors wrap everywhere
* include bench
* added bench info
* equality test
* dup
* gaz
* use new hash tree root in state transition
* fix build
* separate test package
* three targets failign
* single target fails
* please test targets...pass for me
* revert
* Merge branch 'master' into new-ssz-state
* rev
* Merge branch 'new-ssz-state' of github.com:prysmaticlabs/prysm into new-ssz-state
* broken build
* Merge branch 'master' into new-ssz-state
* gaz
* Merge branch 'new-ssz-state' of github.com:prysmaticlabs/prysm into new-ssz-state
* ssz workspace
* master ssz
* Merge branch 'master' into new-ssz-state
* resolve conf
* resolve some conflicts and fix up broken file
* fix up build file issues and sync
* eth1 data votes included
* further abstractions, simplifications
* Merge branch 'master' into new-ssz-state
* gaz
* Merge branch 'new-ssz-state' of github.com:prysmaticlabs/prysm into new-ssz-state
* feature flag gating
* add field count test
* Merge branch 'master' into new-ssz-state
* resolving ivan feedback
* Merge branch 'new-ssz-state' of github.com:prysmaticlabs/prysm into new-ssz-state
* gaz
* Merge branch 'master' into new-ssz-state
* addressed
* Merge branch 'new-ssz-state' of github.com:prysmaticlabs/prysm into new-ssz-state
2019-12-04 19:20:33 +00:00
terence tsao
f75a5a5df8 Implement Atts Pool Curl Methods (#4191)
* New pool
* Better namings
* Fmt
* Gazelle
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into define-pool
* Raul's feedback
* Raul's feedback
* Update to use go-cache
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into define-pool-1
* Update workspace
* Update workspace
* Update pool to use interface
* Move kv init methods
* Curd for aggregated
* Curd for unaggregated
* Gaz
* Tests for aggregated
* Fixed test
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into curd
* Minor fixes
* Typoe
* pool test
* Added deletions as well
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into curd
* Update beacon-chain/operations/attestations/kv/aggregated.go
* Update beacon-chain/operations/attestations/kv/aggregated.go
* Update beacon-chain/operations/attestations/kv/unaggregated_test.go
* Update beacon-chain/operations/attestations/kv/kv.go
2019-12-04 18:30:45 +00:00
shayzluf
ae8df9c32b Slasher optimization (#4172)
* size

* batching and concurrency improvements

* gaz

* merge fixes

* fix comment

* fix test

* fix test

* fix build

* ethpb

* ethpb

* fix test

* fix comment

* add benchmark

* fix benchmark
2019-12-04 12:09:38 +05:30
Raul Jordan
90cbe49496 Implement GetValidator RPC Endpoint (#4188)
* include new patch targeting latest ethapis master
* ensure project builds
* Merge branch 'master' into update-all-api
* fix up committees
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* include latest eth apis
* Merge branch 'master' into update-all-api
* update block tests
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* Merge branch 'master' into update-all-api
* add todos
* implement get validator rpc
* add test for get validator
* table driven test
* fix up test
* fix confs
* tests for more cases
* fix up tests and add out of range
2019-12-04 00:33:34 +00:00
Raul Jordan
c31f46d973 Update EthereumAPIs (#4186)
* include new patch targeting latest ethapis master
* ensure project builds
* Merge branch 'master' into update-all-api
* fix up committees
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* include latest eth apis
* Merge branch 'master' into update-all-api
* update block tests
* Merge branch 'update-all-api' of github.com:prysmaticlabs/prysm into update-all-api
* Merge branch 'master' into update-all-api
* add todos
2019-12-03 23:44:58 +00:00
terence tsao
83781d0b74 Attestation pool to use go-cache (#4187) 2019-12-03 15:07:44 -08:00
Jim McDonald
6488b0527c Log peer conected log for incoming connections (#4173)
* Log peer conected log for incoming connections
* Merge branch 'master' into peerconnected
* Merge branch 'master' into peerconnected
* Update handshake.go
* Update handshake.go
* Merge branch 'master' into peerconnected
* Merge branch 'master' into peerconnected
2019-12-03 22:37:49 +00:00
terence tsao
eeb8779cfc New attestation pool (#4185)
* New pool
* Better namings
* Fmt
* Gazelle
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into define-pool
* Raul's feedback
* Raul's feedback
2019-12-03 22:04:11 +00:00
Raul Jordan
f40bbb92d1 Resolve Broken Active Changes for Validators (#4182)
* fix issues with exited validator indices
* tests pass for validator active set changes and exited keys
* Merge branch 'master' into cached-active-changes
* resolve archive test
* Merge branch 'cached-active-changes' of github.com:prysmaticlabs/prysm into cached-active-changes
* Merge branch 'master' into cached-active-changes
* Merge branch 'master' into cached-active-changes
2019-12-03 21:34:52 +00:00
Nishant Das
4f0bef929f Change BLS to Herumi Again (#4181)
* change to herumi's bls
* change alias
* change to better
* add benchmark
* build
* change to bazel fork
* fix prefix
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* make it work with library
* update to latest
* change again
* add import
* update to latest
* add sha commit
* new static lib with groups swapped
* using herumis new lib
* fix dep paths in c headers
* update again
* new changes
* fix commit
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* fix serialization
* comment
* fix test
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* fix to herumis latest version
* fix test
* fix benchmarks
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* add new workspace
* change commit and remove init
* get test to pass
* remove parameter
* remove reverse byte order
* make gazelle happy
* set pure to off
* fix failing tests
* Merge branch 'master' into herumiBLS
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* Merge branch 'herumiBLS' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* remove old ref
* use HashWithDomain functions
* update to latest version
* clean up
* gaz
* add back removed code
* switch off pure
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* use local repo
* resolve docker issues
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into herumiBLS
* fix build and tests
* gaz
* Merge branch 'master' into herumiBLS
* Merge refs/heads/master into herumiBLS
* Merge refs/heads/master into herumiBLS
2019-12-03 20:29:05 +00:00
Raul Jordan
81a83cf100 Implement Chain Head Stream & Naming Consistency (#4160)
* include stream chain head mock
* uncomment test
* stream chain head implemented
* remove imports
* chain head stream test
* include stream test with mockgen
* test now passes
* checkin items
* stream tests all passing
* rem learn
* fix up fork checker
* add stream ctx
* gaz, fix test
* fix broken test
* Merge branch 'master' into chain-head-stream
* include context in chain head stream happy path test
* Merge branch 'master' into chain-head-stream
* Merge branch 'master' into chain-head-stream
* Merge refs/heads/master into chain-head-stream
* Merge refs/heads/master into chain-head-stream
2019-12-03 19:48:11 +00:00
terence tsao
8bbc589edd Spans and check type (#4164)
* Spans and check type
* Typos
* Remove type checks
* Fixed a test bug
* Merge branch 'master' into subs-fixes
* Merge branch 'master' into subs-fixes
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into subs-fixes
* Revert back type assertions
* Merge branch 'master' into subs-fixes
* Merge branch 'subs-fixes' of https://github.com/prysmaticlabs/prysm into subs-fixes
* Merge branch 'master' into subs-fixes
2019-12-03 19:15:01 +00:00
Preston Van Loon
32245a9062 Deprecate --init-sync-no-verify, make it the default (#4179)
* deprecated --init-sync-no-verify, make it the default
* Merge branch 'master' into deprecate-init-sync-verify-flag
* add more flag info
* Merge branch 'deprecate-init-sync-verify-flag' of github.com:prysmaticlabs/prysm into deprecate-init-sync-verify-flag
* gofmt
* Merge refs/heads/master into deprecate-init-sync-verify-flag
* Merge refs/heads/master into deprecate-init-sync-verify-flag
2019-12-03 18:46:04 +00:00
Nishant Das
28c4f28d32 Add Strict Connection Manager (#4110)
* add forked connMgr
* gaz
* add license header
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into connMgr
* add conn manager test
* gaz
* fix connManager
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into connMgr
* gaz
* remove todo
* add new dep
* lint
* lint
* lint
* space
* visibility
* Merge branch 'master' into connMgr
* Merge branch 'master' into connMgr
* Merge refs/heads/master into connMgr
2019-12-03 18:18:57 +00:00
Peter Pratscher
a2d4701f6e Update API to return empty next page token on last page response (#4176)
* Update API to return empty next page token on last page response
* Update tests
* Merge branch 'master' into api-return-empty-page-token-on-last-page
2019-12-03 17:45:43 +00:00
Nishant Das
8e4022f8aa Remove Outdated Proto Files (#4178)
* remove gateway folder
* update att container
* Merge branch 'master' into removeProtoFiles
* Merge refs/heads/master into removeProtoFiles
2019-12-03 16:45:20 +00:00
terence tsao
42e766e909 Fix on participation RPC return (#4171)
* Edit returning epoch
* Merge branch 'master' into fix-participation-typos
2019-12-03 16:23:03 +00:00
Nishant Das
a686be8bd0 Revert "Revert "Update Pending Queue (#4066)" (#4101)" (#4168)
This reverts commit 7a9c297206.
2019-12-03 07:56:04 -08:00
Nishant Das
e3c3dea5d2 Regenerate Missing Proto Files (#4163)
* regen missing proto files
* Merge branch 'master' into regenProto
2019-12-02 23:19:37 +00:00
Ivan Martinez
7754cfb6c6 Reduce amount of time for benchmark tests (#4166)
* Change benchmarks to use different cache

* Fix bench tests and cache

* Add back sig check for test
2019-12-02 15:54:57 -05:00
Andrei Ivasko
f55a380ade Deposit testing (#4043)
* debugging...
* debugging... feedback required
* moved sendDeposits_test to powchain package
* need some guidance to proceed further
* further guidance needed
* match depositData to depositEvent
* debugging validating merkle root
* fixed compile error
* test passed for a single deposit
* Unable verify deposit merkle branch
* fix test
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into AndreisPR
* ready for review
* Merge branch 'master' into deposit-testing
* Merge branch 'master' into deposit-testing
* applied requested changes
* Merge branch 'master' into deposit-testing
2019-12-01 22:23:55 +00:00
Preston Van Loon
9a317ffc0f Update tracing dependencies (#4158)
* update tracing deps
2019-12-01 06:01:53 +00:00
Raul Jordan
3be4894b8a Update Ethereum APIs, Allow Genesis Data Retrieval for Blocks + Attestations (#4150)
* update apis

* include block filter genesis

* genesis atts

* add in workspace file

* include proper diff targeting master of ethereum apis

* genesis block fetching fixes

* remove fmt

* tests for genesis list blocks passing

* fixed up container tests

* tests now passing

* fix up tests
2019-11-30 22:30:48 -06:00
Jim McDonald
646411b881 Log connections (#4143)
* Log connections
* Merge branch 'master' into logconnection
* Merge branch 'master' into logconnection
* Merge branch 'master' into logconnection
2019-12-01 01:37:42 +00:00
Jim McDonald
0e99e4af4f Update lastUpdated on peerstatus.Set() (#4152)
* Update lastUpdated on Set()
* Merge branch 'master' into lastupdated
* gazelle
* Merge branch 'master' into lastupdated
* Merge branch 'master' into lastupdated
2019-11-30 19:47:08 +00:00
terence tsao
e87337a97a Update forkchoice spec link to v0.9.0 (#4147)
* Update forkchoice doc link to v0.9.0
* Merge refs/heads/master into update-link
2019-11-30 05:48:18 +00:00
Jim McDonald
53523b3eef Implement ListPeers API call (#4151)
* update ethereumapis from https://github.com/prysmaticlabs/ethereumapis/pull/55
* add stub for https://github.com/prysmaticlabs/prysm/issues/4141
* Add ListPeers API call
* Merge
* Add comment for exported method
* Fix visibility of new peers package.
* Merge branch 'master' into peersapi
2019-11-30 05:36:02 +00:00
terence tsao
5ec02b28a5 Remove pruned states check (#4153)
* Removed already pruned check
* Tested run time
2019-11-30 05:07:13 +00:00
Raul Jordan
1620290305 Check in archive.pb.go (#4148)
* gen archive.pb
* Merge branch 'master' into regen-protos
2019-11-29 18:28:21 +00:00
Preston Van Loon
fc171434c5 Update README.md to reflect spec version (#4146)
* Update README.md
2019-11-29 17:15:14 +00:00
Preston Van Loon
b08f3f760d Update ethereumapis (#4142)
* update ethereumapis from https://github.com/prysmaticlabs/ethereumapis/pull/55
* add stub for https://github.com/prysmaticlabs/prysm/issues/4141
2019-11-29 16:44:51 +00:00
terence tsao
7495961d6b Prune boundary state (#4139)
* Delete epoch boundary slot of last finalized epoch
* Case to cover start slot is skipped
* Test
* Feature flag
* feature gate the new functionality only
* Update DB for migration
* Test
* Fmt
* Fixed test
* Gazelle
2019-11-28 23:05:47 +00:00
Preston Van Loon
4dbf68b50c Fix log message from PR #4130 (#4136)
* fix log message from PR #4130
* Merge refs/heads/master into fix-log-2
2019-11-27 23:49:47 +00:00
Raul Jordan
e24b060eb6 README for Third Party Directory (#4134)
* begin readme
* add common bugs
* include more details for third party readme
* patch diff
* add readme
* complete readme
* Merge branch 'master' into third-party-readme
* rev
* Merge branch 'third-party-readme' of github.com:prysmaticlabs/prysm into third-party-readme
* revert
* Update third_party/README.md
* Update third_party/README.md
2019-11-27 23:27:13 +00:00
terence tsao
e90358cd8e Removed unused mocks (#4135)
* Removed unused mocks

* Lint

* Gaz
2019-11-27 14:52:24 -06:00
Nishant Das
80865ff3f2 Account for Skipped Slots When Requesting for Blocks (#4130)
* add check

* Update beacon-chain/sync/initial-sync/round_robin.go
2019-11-27 11:18:18 -06:00
Jim McDonald
60469ec7ee Avoid crash if peer goes missing (#4115)
* Migrate ChainStarted and StateInitialized to state notifier
* Provide state notifier to powchain service
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Remove commented line
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Accept err from HeadState() as non-fatal
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Do not crash if peer goes missing
* Additional catches
* Merge branch 'master' into rrfix
* Use single refresh time
* Merge branch 'master' into rrfix
2019-11-27 16:00:59 +00:00
Jim McDonald
67be8bd4f0 Mirror run definitions in build (#4129)
* Migrate ChainStarted and StateInitialized to state notifier
* Provide state notifier to powchain service
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Remove commented line
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Accept err from HeadState() as non-fatal
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Mirror run arguments in build
* Reset ssz to mainnet for testing
2019-11-27 15:34:57 +00:00
Nishant Das
3682bf1cda Check Best Peer Before Syncing (#4128)
* add check
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into checkBestPeer
* Merge refs/heads/master into checkBestPeer
2019-11-27 06:56:02 +00:00
Preston Van Loon
e203f66fe0 DB Improvements: Snappy compression, remove some unnecessary batch / goroutines (#4125)
* do not use batch for SaveAttestations
* use snappy compression
* Encode / decode everything with snappy
* Add snappy migration path
* batch is probably fine...
* fix test
* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into remove-batch-attestations
* add sanity check
* remove that thing
* gaz
* Merge branch 'master' of github.com:prysmaticlabs/prysm into remove-batch-attestations
2019-11-27 06:32:56 +00:00
terence tsao
04df922ac9 Add votes to tree graph (#4127)
* Added block tree tool
* Gaz
* Updated workspace
* Playing around
* Adding votes
* Votes work
* Comments
* Gaz
* Add tools to subpackage
* Merge branch 'master' into block-tree-tool-1
2019-11-27 06:07:52 +00:00
Raul Jordan
0326be86b5 Apply Patch Rules to Use EthereumAPIs Generated Protos in Prysm (#4112)
* starting on patch
* finish determining all required patches
* properly redefine the patch rules
* new patch
* rem double semicolon
* fix patch file
* Merge branch 'master' of github.com:prysmaticlabs/prysm into deprecate-eth-protos
* building the deps
* test target passes using ethereumapis
* compile gateway
* attempting to build everything
* e2e use ethereumapis
* more fixes for slasher
* other item
* getting closer to compiling slasher
* build slasher package
* Merge branch 'master' into deprecate-eth-protos
* Merge branch 'master' into deprecate-eth-protos
* fix benches
* lint gazelle
* Merge branch 'deprecate-eth-protos' of github.com:prysmaticlabs/prysm into deprecate-eth-protos
* proper gateway
* lint
* Merge branch 'master' into deprecate-eth-protos
* fix build
* Merge branch 'deprecate-eth-protos' of github.com:prysmaticlabs/prysm into deprecate-eth-protos
* use swag
* resolve
* ignore change
* include new patch changes
* fix test
* builds
* fix e2e
* gaz
2019-11-27 05:08:18 +00:00
Nishant Das
a7ccd52a95 Save Deposit Contract Address (#4114)
* save contract address
* Update beacon-chain/node/node.go
* Merge branch 'master' into saveContract
* Merge refs/heads/master into saveContract
2019-11-26 21:01:56 +00:00
terence tsao
1ced4754db Add signatures to logs (#4095)
* Enhance logging with sig
* Fixed
* Merge branch 'master' into add-sig
* Merge branch 'master' into add-sig
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into add-sig
* signature via debug
* Merge branch 'add-sig' of https://github.com/prysmaticlabs/prysm into add-sig
* Merge branch 'master' into add-sig
* Merge branch 'master' into add-sig
* Merge branch 'master' into add-sig
2019-11-26 20:36:18 +00:00
terence tsao
b872f74fd3 Do not save duplicated indices (#4118)
* Added a duplication test

* Refactor

* Updated test

* Do not save dups for indices bucket
2019-11-26 13:26:35 -06:00
Ivan Martinez
c1c48a8af5 Create Benchmarks Package for State Transition (#3688)
* Begin benchmarks file for block processing
* Complete block processing benchmarks
* Begin epoch benchmarks
* Write most of epoch benchmarks
* Start config
* Make cases for max conditions
* Begin work on benchmarking doc
* Update benchmark numbers
* Complete epoch benchmarks
* Minor changes
* Make createFullBlock function
* Clean up block benchmarks
* Begin fixing merge issues
* Start adding 4M benchmarks
* Almost finish epoch benchmarks
* Test blocks under real life conditions
* More progress on benchmarks
* Fixes
* Fix benchmark errors
* Begin fixing benchmarks
* More progress on tests
* Complete epoch benchmarks
* More progress on block benches
* Finish epoch benchmarks, get progress on block benchmarks
* Undo unneeded changes
* Fix
* Fix block benchmarks
* Complete block benchmarks
* Finish block benchmarks
* Complete benchmarks
* Increase block benchmarks to 65536
* Fix everything
* Reset configs after benchmarks
* Fix logging and suggestions
* Fix comments
* Fix benchmarks after merge
* Fix merge issues
* Add sanity tests for benchmark
* Make sanity check simpler
* Begin fixing after merge
* Add log
* Remove extra line
* Remove unneeded change
* Finally get block benchmarks to pass
* Begin fixing epoch test
* Finetuning constants
* Revert "Finetuning constants"

This reverts commit a872790d67.
* Finetuning
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Add benches for helper functions
* Abstract block generation to testutil
* Create block generation util in testutil
* Gazelle
* Fix deps
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into block-util
* Fix imports
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into block-util
* Merge branch 'master' into block-util
* Change tests to use config and fix integer division
* Merge branch 'block-util' of https://github.com/0xKiwi/prysm into block-util
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into block-util
* Remove logs
* Fix build
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Merge branch 'master' into block-util
* Add test to ensure finalization occurs
* Add check for finalization
* Merge branch 'block-util' of https://github.com/0xKiwi/prysm into block-util
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into block-util
* Add comment for incrementing the state
* Fix test
* Fix test
* Merge branch 'master' into block-util
* Fix testutil use
* Fix tests
* Change var name
* Merge branch 'master' into block-util
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Merge branch 'block-util' of https://github.com/0xKiwi/prysm into new-benchmarks
* Begin cleaning benchmarks
* Get some numbers going
* Use state saved to disk
* Remove cruft
* Cleanup
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Fix merge arrows
* Set up block util and benchmarks for 128 attestations
* Use intended config for benchmark
* Add more benchmark functions
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Add benchmark epoch and modify block gen config to exclude signing
* Cleanup
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Begin unstaleling
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Update block gen util to v0.9 changes
* Prepare benchmarks to use marshalled files
* Cleanup block gen tool some more
* split up into file generation and benchmarking
* Remove logrus
* Merge branch 'master' into new-benchmarks
* Get benchmarks work, start work on process epoch benchmark
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Merge branch 'new-benchmarks' of https://github.com/0xKiwi/prysm into new-benchmarks
* All benchmarks working
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Fix after merge
* Cleanup
* Add bazel target
* Added TestBenchmarkExecuteStateTransition_WithCache
* Change tests to use SSZ and begin making binary
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Merge branch 'new-benchmarks' of https://github.com/0xKiwi/prysm into new-benchmarks
* bazel binary
* Fully change to binary
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Create go_binary to handle benchmark files
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Gofmt
* Remove genesis state from generated files
* Fix tests
* Gazelle
* Fix tests
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Fix block util
* Allow attestations to be in future for block util
* Fix inclusion delay issue
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Finally fix test
* Add README detailing usage and results
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Change test to run with bazel test
* Fix imports
* Merge branch 'master' into new-benchmarks
* Accidentally removed  config change
* Merge branch 'new-benchmarks' of https://github.com/0xKiwi/prysm into new-benchmarks
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-benchmarks
* Move to core/state/
* Update readme
* Gazelle
* Remove test for cached block
2019-11-26 18:09:57 +00:00
Nishant Das
b88e6dc918 Speed Up Block Processing In Sync (#4075)
* fix proto
* make them non-batched
* gate behind flag
* fix refs
* fix refs
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into speedUpProcessing
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into speedUpProcessing
* use global archiver flags
* lint
* Merge branch 'master' into speedUpProcessing
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into speedUpProcessing
* preston's review
* Merge branch 'speedUpProcessing' of https://github.com/prysmaticlabs/geth-sharding into speedUpProcessing
* Merge branch 'master' into speedUpProcessing
* Merge branch 'master' into speedUpProcessing
* Merge branch 'master' into speedUpProcessing
2019-11-26 07:15:54 +00:00
terence tsao
3868837471 Tool to dump graphviz data for block tree (#4108) 2019-11-25 21:06:25 -08:00
Preston Van Loon
60b1596c4d Lower conn mgr grace period to 1s (#4109)
* Update options.go
2019-11-26 01:32:10 +00:00
Raul Jordan
4f0dcd5e6e Prevent Requesting Current Epoch for Validator Participation (#4104)
* cannot request current epoch
* test for prev epoch instead
* Merge branch 'master' into no-curr-epoch-participation
* Merge branch 'master' into no-curr-epoch-participation
2019-11-25 23:31:21 +00:00
Preston Van Loon
ac405c714f Enforce --p2p-max-peers (#4106)
* Enforce p2p-max-peers
* high == low
2019-11-25 18:55:20 +00:00
Nishant Das
7d0e5a9dc4 Use Latest Vote Map (#4102)
* add latest vote map
* fix all tests
* remove db crud methods
* Merge branch 'master' into latestVoteMap
* preston's review
* Merge branch 'latestVoteMap' of https://github.com/prysmaticlabs/geth-sharding into latestVoteMap
2019-11-25 16:34:20 +00:00
Raul Jordan
feb1267fee Properly Return Finalized Epoch in GetValidatorParticipation Archival Endpoint (#4091)
* properly handle retrieving archived finalized epochs
* test passes for determining if epoch finalized
* Merge branch 'master' into archive-finality
* Merge refs/heads/master into archive-finality
* Merge refs/heads/master into archive-finality
* Merge refs/heads/master into archive-finality
* Merge refs/heads/master into archive-finality
* Merge refs/heads/master into archive-finality
* Merge branch 'master' into archive-finality
* prevent setup panic
* Merge branch 'archive-finality' of github.com:prysmaticlabs/prysm into archive-finality
* Merge refs/heads/master into archive-finality
2019-11-25 15:26:32 +00:00
Nishant Das
7a9c297206 Revert "Update Pending Queue (#4066)" (#4101)
This reverts commit a264a097cc.
2019-11-24 21:05:51 -08:00
terence tsao
21deed0fb7 Revert "Add Lock When Accessing Checkpoints" (#4094)
* Revert "Add Lock When Accessing Checkpoints (#4086)"

This reverts commit 2f392544a6.
* Merge branch 'master' into revert-4086-checkpointLock
2019-11-24 17:12:56 +00:00
terence tsao
627791c54e Complete finalization metrics (#4096)
* Complete finaliztion metrics

* Fixed test
2019-11-23 17:12:29 -08:00
Jim McDonald
3358bde42d Feedfixes (#4093)
* Migrate ChainStarted and StateInitialized to state notifier
* Provide state notifier to powchain service
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Remove commented line
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Accept err from HeadState() as non-fatal
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Explicit unsubscribes from state channels where required
2019-11-23 11:15:02 +00:00
Jim McDonald
9e45cffabc Move StateInitialized and ChainStarted to state feed (#4084)
* Migrate ChainStarted and StateInitialized to state notifier
* Provide state notifier to powchain service
* Merge remote-tracking branch 'upstream/master'
* Merge remote-tracking branch 'upstream/master'
* Remove commented line
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge remote-tracking branch 'upstream/master'
* Merge branch 'master' of github.com:mcdee/prysm
* Accept err from HeadState() as non-fatal
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
2019-11-23 03:35:47 +00:00
terence tsao
2c8ff7b36f Update attester to wait till one-third (#4090)
* Update to 1/3
* Use tag
* Test
* Fixed test
* Merge branch 'master' into update-1/3
* Merge branch 'master' into update-1/3
2019-11-23 02:23:20 +00:00
terence tsao
a7ec0679b5 Implement wait till two thirds (#4089)
* Implement two thirds

* Test
2019-11-22 16:48:40 -06:00
Preston Van Loon
f717c5d852 Release --prune-finalized-states to all (#4082)
* Release --prune-finalized-states to all
* Merge branch 'master' into deprecate-ff-prune-finalized-states
* Merge branch 'master' of github.com:prysmaticlabs/prysm into deprecate-ff-prune-finalized-states
* Merge refs/heads/master into deprecate-ff-prune-finalized-states
* Merge refs/heads/master into deprecate-ff-prune-finalized-states
* Merge refs/heads/master into deprecate-ff-prune-finalized-states
* Merge refs/heads/master into deprecate-ff-prune-finalized-states
* Merge branch 'master' of github.com:prysmaticlabs/prysm into deprecate-ff-prune-finalized-states
2019-11-22 20:29:46 +00:00
Preston Van Loon
0cec0ee6c3 Release --optimize-process-epoch to all (#4080)
* Release optimize-proces-epoch to all
* Merge branch 'master' into deprecate-ff-optimize-process-epoch
* Merge branch 'master' of github.com:prysmaticlabs/prysm into deprecate-ff-optimize-process-epoch
* Merge refs/heads/master into deprecate-ff-optimize-process-epoch
* Merge refs/heads/master into deprecate-ff-optimize-process-epoch
* Merge refs/heads/master into deprecate-ff-optimize-process-epoch
* Merge refs/heads/master into deprecate-ff-optimize-process-epoch
2019-11-22 09:40:53 +00:00
Nishant Das
2f392544a6 Add Lock When Accessing Checkpoints (#4086)
* fix data races
* Merge branch 'master' into checkpointLock
* Merge branch 'master' into checkpointLock
* Merge refs/heads/master into checkpointLock
2019-11-22 06:34:42 +00:00
Preston Van Loon
75ce8359eb Buildkite: Disable failing BES (#4087)
* Disable failing BES
2019-11-22 06:11:41 +00:00
terence tsao
f5cb04012e Aggregator selection from RPC to validator client (#4071)
* Config
* Updated proto
* Updated pool
* Updated RPC
* Updated validator client
* run time works
* Clean ups
* Fix tests
* Visibility
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into aggregator
* Raul's feedback
* Tests for RPC server
* Tests for validator client
* Span
* More tests
* Use go routine for SubmitAggregateAndProof
* Go routines
* Updated comments
* Use array of roles
* Fixed tests
* Build
* Update validator/client/runner.go

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update validator/client/runner.go

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* If
* Merge branch 'refactor-validator-roles' of https://github.com/prysmaticlabs/prysm into refactor-validator-roles
* Empty
* Feedback
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into aggregator
* Removed proto/eth/v1alpha1/shard_chain.pb.go?
* Cleaned up
* Revert
* Comments
* Lint
* Comment
* Merge branch 'master' into aggregator
2019-11-22 05:11:38 +00:00
Raul Jordan
f461d1e024 Resolve Panic in ListValidatorBalances RPC (#4051)
* adding default response
* regression test for archival
* include full regression test
* Merge branch 'master' into resolve-panic-rpc
* Merge branch 'master' into resolve-panic-rpc
* Merge branch 'master' into resolve-panic-rpc
* Merge branch 'master' into resolve-panic-rpc
* listbal
* Merge branch 'master' into resolve-panic-rpc
* Merge branch 'master' into resolve-panic-rpc
* Merge branch 'master' into resolve-panic-rpc
* Merge refs/heads/master into resolve-panic-rpc
2019-11-22 04:39:28 +00:00
Preston Van Loon
bdbd0aaeb8 Deprecate feature flag --scatter (#4079)
* deprecate --scatter. issue #4031
* forgot one for #4061
* use deprecatedUsage
* hidden
* Merge branch 'master' into deprecate-ff-scatter
* Merge branch 'master' into deprecate-ff-scatter
2019-11-22 04:08:49 +00:00
Preston Van Loon
715d06a215 gRPC Gateway: Emit JSON empty fields by default (#4085)
* Emit JSON empty fields by default
* Merge branch 'master' into emit-json-empty-fields
2019-11-22 03:36:47 +00:00
terence tsao
976a3af637 Refactor validator roles into an array (#4081) 2019-11-21 14:35:20 -08:00
Raul Jordan
8f8d2d36c0 Filter ListValidators by Active in RPC (#4061)
* update workspace

* include active filter

* fix up latest changes to match naming

* better comments, fix evaluators

* latest master

* filter items

* filter only active validators
2019-11-21 14:29:24 -06:00
Nishant Das
a264a097cc Update Pending Queue (#4066)
* update queue

* fix test

* put this all in validate method

* remove ancestors too

* not needed

* terence's review

* period

* preston's review
2019-11-21 21:24:50 +08:00
shayzluf
4330839bc1 Add surround check to endpoint (#4065)
* first version of the watchtower api

* service files

* Begin work on grpc server

* More changes to server

* REnames and mock setup

* working test

* merge

* double propose detection test

* nishant review

* todo change

* gaz

* fix service

* gaz

* remove unused import

* gaz

* resolve circular dependency

* resolve circular dependency 2nd try

* remove package

* fix package

* fix test

* added tests

* gaz

* remove status check

* gaz

* remove context

* remove context

* change var name

* moved to rpc dir

* gaz

* remove server code

* gaz

* slasher server

* visibility change

* pb

* service update

* gaz

* slasher grpc server

* making it work

* setup db and start

* gaz

* service flags fixes

* grpc service running

* go imports

* remove new initializer

* gaz

* remove feature flags

* change back SetupSlasherDB

* fix SetupSlasherDB calls

* define err

* fix bad merge

* fix test

* fix imports

* fix imports

* fix imports

* add cancel

* comment stop

* fix cancel issue

* remove unneeded code

* bring back bad merge that removed TODO

* remove use of epoch as am input

* fixed slasher to be runable again

* wait for channel close

* gaz

* small test

* flags fix

* fix flag order

* double vote detection

* remove source epoch from indexed attestation indices

* change server method to receive indexed attestation

* start implementation

* double vote detection

* proto

* pb

* fir comment

* add surround detection and retrieval to endpoint

* nishant review

* import fix

* fix miss order

* fix detection 0 case
added tests

* terence review
2019-11-21 12:41:23 +05:30
terence tsao
835418d1e3 Fix UpdateCommitteeCache slot (#4074) 2019-11-20 21:29:43 -08:00
Raul Jordan
ae07dc7962 Archive Data Even Through Skip Slots (#4054)
* red test first

* does not archive through skip slot

* test out at runtime

* underflow check

* fix tests

* rem info log
2019-11-19 23:53:28 -06:00
shayzluf
d071a0a90a Double vote detection (#4049)
* first version of the watchtower api

* service files

* Begin work on grpc server

* More changes to server

* REnames and mock setup

* working test

* merge

* double propose detection test

* nishant review

* todo change

* gaz

* fix service

* gaz

* remove unused import

* gaz

* resolve circular dependency

* resolve circular dependency 2nd try

* remove package

* fix package

* fix test

* added tests

* gaz

* remove status check

* gaz

* remove context

* remove context

* change var name

* moved to rpc dir

* gaz

* remove server code

* gaz

* slasher server

* visibility change

* pb

* service update

* gaz

* slasher grpc server

* making it work

* setup db and start

* gaz

* service flags fixes

* grpc service running

* go imports

* remove new initializer

* gaz

* remove feature flags

* change back SetupSlasherDB

* fix SetupSlasherDB calls

* define err

* fix bad merge

* fix test

* fix imports

* fix imports

* fix imports

* add cancel

* comment stop

* fix cancel issue

* remove unneeded code

* bring back bad merge that removed TODO

* remove use of epoch as am input

* fixed slasher to be runable again

* wait for channel close

* gaz

* small test

* flags fix

* fix flag order

* double vote detection

* remove source epoch from indexed attestation indices

* change server method to receive indexed attestation

* start implementation

* double vote detection

* proto

* pb

* fir comment

* nishant review

* import fix

* Update slasher/db/indexed_attestations.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* terence feedback
2019-11-20 10:44:50 +05:30
Ivan Martinez
2d7802c637 Rename featureconfig.Flag to Flags (#4063) 2019-11-19 21:03:00 -06:00
terence tsao
fcb663acde Implement aggregation helpers (#4062)
* Aggregation helpers

* Tests

* Config

* Faulty test cases

* Err
2019-11-19 20:24:39 -06:00
Raul Jordan
858dbbf038 Update Ethereum APIs and Match Schemas (#4059)
* update workspace

* include active filter

* fix up latest changes to match naming

* better comments, fix evaluators

* latest master

* Update proto/eth/v1alpha1/beacon_chain.proto
2019-11-19 18:36:45 -06:00
Jim McDonald
49c2dd2cfc Move the state notifier to a different module (#4058)
* Move state notifier to statefeed

* Updates to state notifier

* Create state feed in beacon node

* Formatting
2019-11-19 16:15:48 -06:00
terence tsao
7a22e98c0f Update ChainHead (#4053)
* Can build

* All tests pass

* Update beacon-chain/blockchain/chain_info.go

* Fix context

* Update chainhead

* Tests

* Tests

* e2e

* Update ordering

* Typo

* Use root to get slot

* Division
2019-11-19 13:33:13 -06:00
terence tsao
26da7c4114 Nil state fallback in Blockchain.HeadState() (#4042)
* Can build

* All tests pass

* Update beacon-chain/blockchain/chain_info.go

* Fix context
2019-11-19 10:12:50 -06:00
Nishant Das
7acb45d186 add one more return (#4050) 2019-11-19 09:40:15 -06:00
Preston Van Loon
24a5000e47 return to exit select loop rather than break (#4040) 2019-11-19 21:22:45 +08:00
Jim McDonald
65d920e13a Add generic state feed (#4004)
* Initial implementation of state feed

* Add instructions on adding new events

* Tidy up log messages

* Tidy up mock

* Update beacon-chain/core/statefeed/events.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update beacon-chain/core/statefeed/events.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Remove unused BlockReceivedData

* Rename BlockHash to BlockRoot in BlockProcessedData

* Punctuation

* Use correct root for block processed event

* StateFeeder -> StateNotifier; fix up tests.

* Add Verified flag to BlockProcessed event

* Fix visibility in Bazel
2019-11-19 17:17:41 +08:00
Preston Van Loon
d27d18b192 Update protobuf to v3.10.1 and rules_docker to v0.12.1 (#4048)
* update protobuf v3.10.1

* update rules_docker
2019-11-18 23:02:25 -08:00
Nishant Das
0e88085661 add step fix (#4047) 2019-11-19 12:51:40 +08:00
Preston Van Loon
3f6435ac80 Fix server side beacon blocks by range (#4046) 2019-11-18 19:56:37 -08:00
Preston Van Loon
64b69d9216 remove fully async from bes upload (#4044) 2019-11-19 10:57:34 +08:00
Preston Van Loon
13207a9de5 Improve validator status method (#4032)
* Cleanup validatorStatus

* gaz

* fix tests

* fix tests
2019-11-18 16:47:02 -08:00
Raul Jordan
ab756ec094 Return Empty Results Instead of Pagination Error in RPC + Prevent Future Epoch Requests (#4030)
* return empty if no attestations

* list balances proper response

* standardize epoch error

* future epoch error test

* no results test

* no results in list attestations

* test for list blocks no results

* cannot request future epoch for balances rpc

* test for no results in balances

* adding tests for get validator

* cannot request future in participation

* useless conditional

* resolve old epoch test

* completed failing tests

* fix request bug
2019-11-18 17:24:33 -06:00
terence tsao
499f05f34b Return error instead of logging (#4039) 2019-11-18 14:09:26 -08:00
Preston Van Loon
0077654fb5 Fix deleted branch from ethereumapis (#4034) 2019-11-18 13:14:49 -08:00
terence tsao
f8cac0fb41 RPC assignment Nil state check (#4033)
* State nil check and test

* One more check
2019-11-18 14:33:27 -06:00
shayzluf
607f086de9 Surround detection (#3967)
* min max span update logic

* add comment to exported method

* Update slasher/rpc/update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/update_min_max_span_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/update_min_max_span.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/rpc/update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* weak subjectivity error

* add context

* SlasherDb change to SlasherDB

* gaz

* raul feedback

* fix old problem

* gofmt goimports

* gaz

* import fix

* change order

* min max span detection

* added benchmark

* max diff without error

* Update slasher/rpc/detect_update_min_max_span_bench_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/db/indexed_attestations.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span_bench_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span_bench_test.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* raul feedback, benchmark fix

* raul feedback

* gaz

* fix merge

* bench fix

* another bench fix

* comments

* changed names of functions and proto

* name change fix

* name change fix

* fix test

* clarification comment

* change to interface

* Update proto/eth/v1alpha1/slasher.proto

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>

* change order to reduce confusion

* Update proto/eth/v1alpha1/slasher.proto

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update slasher/rpc/detect_update_min_max_span.go

* Fix some comments

* terence feedback

* preston feedback

* fix test

* fix comments
2019-11-18 13:49:39 -06:00
Nishant Das
3b18aee181 Handle Missing Logs (#4012)
* make it non-recursive

* add new test case

* Update beacon-chain/powchain/log_processing.go

* Update beacon-chain/powchain/log_processing_test.go

* Update beacon-chain/powchain/log_processing_test.go

* Update beacon-chain/powchain/log_processing.go

* Update beacon-chain/powchain/log_processing.go

* standardize error
2019-11-18 12:34:34 -06:00
terence tsao
f43a7c67f2 Process attestation to use operation service's pool (#4014)
* Starting

* Routine working

* Single client working

* Fixed all the tests

* Lint

* Gazelle

* 12

* Tests
2019-11-18 11:19:03 -06:00
Nishant Das
199ddc6cdb Update To Latest Eth API (#4028)
* update to latest

* add container

* update to current eth repo

* fix test

* change to signing root

* gaz

* fix test

* fix test
2019-11-18 10:15:45 -06:00
Nishant Das
023dfebc73 Revert "Reverts the Revert (#4011)" (#4026)
This reverts commit c4ca8a47b3.
2019-11-17 22:48:22 -08:00
Nishant Das
53c4a26184 Optimize Processing Of Past Logs (#4015)
* add test and new code

* fix failing test

* better clean up

* change back to debug

* remove space
2019-11-17 10:16:20 -06:00
terence tsao
5acc362f7e End slot can't be greater than start slot (#4008) 2019-11-15 16:23:43 -08:00
Ivan Martinez
68edad13bc End To End Tests for Demo and Minimal config (#3932)
* Begin working on end to end tests using geth dev chain

* Start on beacon node set up

* More progress on bnode setup

* Complete flow until chainstart, begin work on evaluators

* More progress on evaluators

* Start changing bazel run to direct binary

* Move endtoend to inside beacon-chain

* use bazel provided geth, use bazel test

* tempdir

* use fork rules_go

* Change to use UUID dir and bazel binaries

* Truncate UUID a bit

* Get full run from chainstart to evaluating

* Rewrite to react to logs rather than arbitrarily wait

* Fix export

* Move evaluators to evaluators.go

* Add peer check test

* Add more comments

* Remove unneeded exports

* Check all nodes have the correct amount of peers

* Change name to onGenesisEpoch

* Remove extra wait times where not needed

* Cleanup

* Add log for beacon start

* Fix deposit amount

* Make room for eth1follow distnce

* Cleanup and fix minimal test

* Goimports

* Fix imports

* gazelle and minimal

* manual

* Fix for comments

* Make timing rely on reading logs, and cleanup

* Fix for comments

* Fix workspace

* Cleanup

* Fix visibility

* Cleanup and some comments

* Address comments

* Fix for v0.9

* Modify for v0.9

* Move to own package outside of beacon-chain

* Gazelle

* Polishing, logging

* Fix filenames

* Add more logs

* Add flag logging

* Cover for page not having libp2p info

* Improve multiAddr detection

* Add more logs

* Add missing flags

* Add log printing to defer

* Get multiAddr from logs

* Fix logging and detection

* Change evaluators to rely on EpochTimer

* Add evaluator for ValidatorParticipation

* Fix validator participation evaluator

* Cleanup, comments and fix participation calculation

* Cleanup

* Let the file searcher search for longer

* Change participation to check for full

* Log out file contents if text isnt found

* Split into different files

* Disable IPC and use RPC instead, change tmp dir to bazel dir

* Change visibility

* Gazelle

* Add e2e tag

* new line
2019-11-15 13:56:26 -05:00
shayzluf
bb2f329562 remove source epoch from indexed attestation indices (#4010) 2019-11-15 10:48:45 -06:00
Raul Jordan
5169209360 Properly Archive Active Set Changes (#4007)
* archiving information properly

* tests passing

* broken test fix
2019-11-15 10:45:02 -06:00
Nishant Das
c4ca8a47b3 Reverts the Revert (#4011)
* Revert "Revert "Change BLS Library to Herumi (#3752)" (#4006)"

This reverts commit 904898e405.

* turn it on

* make all docker images with cgo deps static

* change back

* fix build

* switch back

* address gateway

* fix library again
2019-11-15 10:27:23 -06:00
Nishant Das
904898e405 Revert "Change BLS Library to Herumi (#3752)" (#4006)
This reverts commit 24583864b4.
2019-11-14 13:00:50 -05:00
Raul Jordan
7f96fcc51b nil check in active set changes (#4005) 2019-11-15 00:21:30 +08:00
Nishant Das
24583864b4 Change BLS Library to Herumi (#3752)
* change to herumi's bls

* change alias

* change to better

* add benchmark

* build

* change to bazel fork

* fix prefix

* make it work with library

* update to latest

* change again

* add import

* update to latest

* add sha commit

* new static lib with groups swapped

* using herumis new lib

* fix dep paths in c headers

* update again

* new changes

* fix commit

* fix serialization

* comment

* fix test

* fix to herumis latest version

* fix test

* fix benchmarks

* add new workspace

* change commit and remove init

* get test to pass

* remove parameter

* remove reverse byte order

* make gazelle happy

* set pure to off

* fix failing tests

* remove old ref

* use HashWithDomain functions

* update to latest version

* clean up

* gaz

* add back removed code

* switch off pure
2019-11-14 09:51:42 -06:00
Celeste A.S
db9153e8e4 Local dev instructions added (#3980)
* Interop instructions added

Interop instructions have been merged to the main README in addition to a number of formatting adjustments

* Interop instruction adjustments

* Formatting adjustments

Changes to resolve PR comments
2019-11-13 15:43:38 -06:00
Raul Jordan
cd6e3e8a09 Productionize RPC Server Error Codes (#3994)
* carefully return grpc status codes in attester server

* import spacing

* work on status codes

* codes in validator

* most changes done

* gaz and imports

* done

* fix broken tests

* tests fixed
2019-11-13 15:03:12 -06:00
Preston Van Loon
fc7c530696 Use a data table for common power of 2 roots (#3995)
* use a data table for common power of 2 roots

* revert beacon-chain/rpc/proposer/server.go
2019-11-13 14:03:42 -06:00
Nishant Das
8f05f14b36 Validate Deposit Transactions (#3992)
* check deposit txs

* add comment

* gaz

* docker build

* Update tools/cluster-pk-manager/server/server.go

* Update tools/cluster-pk-manager/server/server.go
2019-11-13 10:31:57 -06:00
Jim McDonald
3b8701296b Avoid repeated hashing (#3981) 2019-11-14 00:03:27 +08:00
Raul Jordan
48f69c0762 better comment (#3990) 2019-11-13 23:37:23 +08:00
844 changed files with 75079 additions and 50154 deletions

162
.bazelrc
View File

@@ -16,9 +16,169 @@ run --host_force_python=PY2
--experimental_sandbox_default_allow_network=false
# Use minimal protobufs at runtime
run --define ssz=minimal
run --define ssz=mainnet
test --define ssz=mainnet
build --define ssz=mainnet
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
test --incompatible_strict_action_env
run --incompatible_strict_action_env
# Disable kafka by default, it takes a long time to build...
build --define kafka_enabled=false
test --define kafka_enabled=false
run --define kafka_enabled=false
# Release flags
build:release --workspace_status_command=./scripts/workspace_status.sh
build:release --stamp
build:release --compilation_mode=opt
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback

1
.bazelversion Normal file
View File

@@ -0,0 +1 @@
2.1.1

View File

@@ -11,11 +11,10 @@ build:remote-cache --strategy=Closure=standalone
build:remote-cache --strategy=Genrule=standalone
# Build results backend.
build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
build:remote-cache --bes_backend=buildeventservice.googleapis.com
build:remote-cache --bes_timeout=60s
build:remote-cache --project_id=prysmaticlabs
build:remote-cache --bes_upload_mode=fully_async
#build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
#build:remote-cache --bes_backend=buildeventservice.googleapis.com
#build:remote-cache --bes_timeout=60s
#build:remote-cache --project_id=prysmaticlabs
# Prysm specific remote-cache properties.
build:remote-cache --disk_cache=
@@ -46,3 +45,6 @@ build --stamp
test --local_test_jobs=2
# Disabled race detection due to unstable test results under constrained environment build kite
# build --features=race
# Enable kafka for CI tests only.
test --define kafka_enabled=true

6
.gitignore vendored
View File

@@ -4,6 +4,9 @@ bazel-*
.DS_Store
.swp
# Ignore VI/Vim swapfiles
.*.sw?
# IntelliJ
.idea
.ijwb
@@ -26,3 +29,6 @@ password.txt
# go dependancy
/go.mod
/go.sum
# Dist files
dist

View File

@@ -31,27 +31,21 @@ alias(
alias(
name = "grpc_proto_compiler",
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
visibility = [
"//proto:__subpackages__",
],
visibility = ["//visibility:public"],
)
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
alias(
name = "grpc_nogogo_proto_compiler",
actual = "@io_bazel_rules_go//proto:go_grpc",
visibility = [
"//proto:__subpackages__",
],
visibility = ["//visibility:public"],
)
# Protobuf gRPC gateway compiler
alias(
name = "grpc_gateway_proto_compiler",
actual = "@grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
visibility = [
"//proto:__subpackages__",
],
visibility = ["//visibility:public"],
)
gometalinter(
@@ -143,3 +137,9 @@ common_files = {
),
tags = ["manual"],
) for pair in binary_targets]
toolchain(
name = "built_cmake_toolchain",
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
)

View File

@@ -45,10 +45,9 @@ Open up two terminal windows, run:
```
bazel run //beacon-chain -- \
--no-genesis-delay \
--bootstrap-node= \
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
--clear-db \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--interop-num-validators 64 \
--interop-eth1data-votes
```
@@ -58,11 +57,10 @@ the system with 64 validators and the genesis time set to the current unix times
Wait a bit until your beacon chain starts, and in the other window:
```
bazel run //validator -- --interop-num-validators 64
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
```
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
specify which keys
### Launching from `genesis.ssz`
@@ -70,10 +68,9 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
```
bazel run //beacon-chain -- \
--no-genesis-delay \
--bootstrap-node= \
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
--clear-db \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--interop-genesis-state /path/to/genesis.ssz \
--interop-eth1data-votes
```
@@ -81,13 +78,7 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
Wait a bit until your beacon chain starts, and in the other window:
```
bazel run //validator -- --interop-num-validators 64
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
```
This will launch and kickstart the system with your 64 validators performing their duties accordingly.

233
README.md
View File

@@ -1,206 +1,235 @@
# Prysm: Ethereum 'Serenity' 2.0 Go Implementation
# Prysm: An Ethereum 2.0 Client Written in Go
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![ETH2.0_Spec_Version 0.8.1](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v0.8.1-blue.svg)](https://github.com/ethereum/eth2.0-specs/commit/452ecf8e27c7852c7854597f2b1bb4a62b80c7ec)
[![ETH2.0_Spec_Version 0.9.3](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v0.9.3-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v0.9.3)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/KSA7rPr)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
This is the Core repository for Prysm, [Prysmatic Labs](https://prysmaticlabs.com)' [Go](https://golang.org/) implementation of the Ethereum protocol 2.0 (Serenity).
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
### Need assistance?
A more detailed set of installation and usage instructions as well as explanations of each component are available on our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
**Interested in what's next?** Be sure to read our [Roadmap Reference Implementation](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md) document. This page outlines the basics of sharding as well as the various short-term milestones that we hope to achieve over the coming year.
A more detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
### Come join the testnet!
Participation is now open to the public in our testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) for more information on the project itself or to sign up as a validator on the network.
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network.
# Table of Contents
- [Dependencies](#dependencies)
- [Installation](#installation)
- [Build Via Docker](#build-via-docker)
- [Build Via Bazel](#build-via-bazel)
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
- [Installation](#installing-prysm)
- [Build via Docker](#build-via-docker)
- [Build via Bazel](#build-via-bazel)
- [Connecting to the public testnet: running a beacon node](#connecting-to-the-testnet-running-a-beacon-node)
- [Running via Docker](#running-via-docker)
- [Running via Bazel](#running-via-bazel)
- [Staking ETH: running a validator client](#staking-eth-running-a-validator-client)
- [Activating your validator: depositing 3.2 Goerli ETH](#activating-your-validator-depositing-32-gerli-eth)
- [Starting the validator with Bazel](#starting-the-validator-with-bazel)
- [Setting up a local ETH2 development chain](#setting-up-a-local-eth2-development-chain)
- [Installation and dependencies](#installation-and-dependencies)
- [Running a local beacon node and validator client](#running-a-local-beacon-node-and-validator-client)
- [Testing Prysm](#testing-prysm)
- [Contributing](#contributing)
- [License](#license)
## Dependencies
Prysm can be installed either with Docker **(recommended method)** or using our build tool, Bazel. The below instructions include sections for performing both.
**For Docker installations:**
- The latest release of [Docker](https://docs.docker.com/install/)
Prysm can be installed either with Docker **\(recommended\)** or using our build tool, Bazel. The below instructions include sections for performing both.
**For Bazel installations:**
- The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
- A modern UNIX operating system (MacOS included)
#### **For Docker installations:**
## Installation
* The latest release of [Docker](https://docs.docker.com/install/)
#### **For Bazel installations:**
* The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
* The latest release of `cmake`
* The latest release of `git`
* A modern UNIX operating system \(macOS included\)
## Installing Prysm
### Build via Docker
1. Ensure you are running the most recent version of Docker by issuing the command:
```
```text
docker -v
```
2. To pull the Prysm images from the server, issue the following commands:
```
2. To pull the Prysm images, issue the following commands:
```text
docker pull gcr.io/prysmaticlabs/prysm/validator:latest
docker pull gcr.io/prysmaticlabs/prysm/beacon-chain:latest
```
This process will also install any related dependencies.
### Build via Bazel
1. Open a terminal window. Ensure you are running the most recent version of Bazel by issuing the command:
```
```text
bazel version
```
2. Clone this repository and enter the directory:
```
2. Clone Prysm's [main repository](https://github.com/prysmaticlabs/prysm) and enter the directory:
```text
git clone https://github.com/prysmaticlabs/prysm
cd prysm
```
3. Build both the beacon chain node implementation and the validator client:
```
3. Build both the beacon chain node and the validator client:
```text
bazel build //beacon-chain:beacon-chain
bazel build //validator:validator
```
Bazel will automatically pull and install any dependencies as well, including Go and necessary compilers.
4. Build the configuration for the Prysm testnet by issuing the commands:
## Connecting to the testnet: running a beacon node
```
bazel build --define ssz=minimal //beacon-chain:beacon-chain
bazel build --define ssz=minimal //validator:validator
```
Below are instructions for initialising a beacon node and connecting to the public testnet. To further understand the role that the beacon node plays in Prysm, see [this section of the documentation.](https://docs.prylabs.network/docs/how-prysm-works/architecture-overview/)
The binaries will be built in an architecture-dependent subdirectory of `bazel-bin`, and are supplied as part of Bazel's build process. To fetch the location, issue the command:
```
$ bazel build --define ssz=minimal //beacon-chain:beacon-chain
...
Target //beacon-chain:beacon-chain up-to-date:
bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain
...
```
In the example above, the beacon chain binary has been created in `bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain`.
## Running an Ethereum 2.0 Beacon Node
To understand the role that both the beacon node and validator play in Prysm, see [this section of our documentation](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical).
**NOTE:** It is recommended to open up port 13000 on your local router to improve connectivity and receive more peers from the network. To do so, navigate to `192.168.0.1` in your browser and login if required. Follow along with the interface to modify your routers firewall settings. When this task is completed, append the parameter`--p2p-host-ip=$(curl -s ident.me)` to your selected beacon startup command presented in this section to use the newly opened port.
### Running via Docker
**Docker on Linux/Mac:**
#### **Docker on Linux/macOS:**
To start your beacon node, issue the following command:
```
docker run -v $HOME/prysm-data:/data -p 4000:4000 \
--name beacon-node \
```text
docker run -it -v $HOME/prysm:/data -p 4000:4000 -p 13000:13000 --name beacon-node \
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
--no-genesis-delay \
--datadir=/data
```
(Optional) If you want to enable gRPC, then run this command instead of the one above:
The beacon node can be halted by either using `Ctrl+c` or with the command:
```
docker run -v $HOME/prysm-data:/data -p 4000:4000 -p 7000:7000 \
--name beacon-node \
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
--datadir=/data \
--no-genesis-delay \
--grpc-gateway-port=7000
```
You can stop the beacon node using `Ctrl+c` or with the following command:
=======
```
```text
docker stop beacon-node
```
To restart the beacon node, issue the command:
To restart the beacon node, issue the following command:
```
```text
docker start -ai beacon-node
```
To delete a corrupted container, issue the command:
To delete a corrupted container, issue the following command:
```
```text
docker rm beacon-node
```
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--force-clear-db` parameter:
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--clear-db` parameter:
```
docker run -it -v $HOME/prysm-data:/data -p 4000:4000 --name beacon-node \
```text
docker run -it -v $HOME/prysm:/data -p 4000:4000 -p 13000:13000 --name beacon-node \
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
--datadir=/data \
--force-clear-db
--clear-db
```
**Docker on Windows:**
#### **Docker on Windows:**
1) You will need to share the local drive you wish to mount to to container (e.g. C:).
1. Enter Docker settings (right click the tray icon)
2. Click 'Shared Drives'
3. Select a drive to share
4. Click 'Apply'
1. You will need to 'share' the local drive you wish to mount to \(e.g. C:\).
1. Enter Docker settings \(right click the tray icon\)
2. Click 'Shared Drives'
3. Select a drive to share
4. Click 'Apply'
2. You will next need to create a directory named `/prysm/` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that `C:` is your prior-selected shared Drive.
3. To run the beacon node, issue the following command:
2) You will next need to create a directory named ```/tmp/prysm-data/``` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that ```C:``` is your prior-selected shared Drive.
4) To run the beacon node, issue the command:
```
docker run -it -v c:/tmp/prysm-data:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data
```text
docker run -it -v c:/prysm/:/data -p 4000:4000 -p 13000:13000 --name beacon-node gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --clear-db
```
### Running via Bazel
1) To start your Beacon Node with Bazel, issue the command:
To start your Beacon Node with Bazel, issue the following command:
```text
bazel run //beacon-chain -- --clear-db --datadir=$HOME/prysm
```
bazel run //beacon-chain -- --datadir=/tmp/prysm-data
```
This will sync up the Beacon Node with the latest head block in the network. Note that the beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and funds will be lost.
This will sync up the beacon node with the latest head block in the network.
## Staking ETH: Running a Validator Client
**NOTE:** The beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and **funds will lost**.
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract to activate your validator (discussed in the section below). First though, you will need to create a validator client to connect to this node in order to stake and participate. Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
### Activating Your Validator: Depositing 3.2 Goerli ETH
## Staking ETH: Running a validator client
Using your validator deposit data from the previous step, follow the instructions found on https://prylabs.net/participate to make a deposit.
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into a [validator deposit contract](https://docs.prylabs.network/docs/how-prysm-works/validator-deposit-contract) in order to activate your validator \(discussed in the section below\). First though, you will need to create this validator and connect to this node to participate in consensus.
It will take a while for the nodes in the network to process your deposit, but once your node is active, the validator will begin doing its responsibility. In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, you'll start gradually losing your deposit until you are removed from the system.
Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
### Starting the validator with Bazel
### Activating your validator: depositing 3.2 Göerli ETH
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](https://docs.prylabs.network/docs/activating-a-validator)section of this documentation.
It will take a while for the nodes in the network to process a deposit. Once the node is active, the validator will immediately begin performing its responsibilities.
In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, a validator will start gradually losing its deposit until it is removed from the network entirely.
1. Open another terminal window. Enter your Prysm directory and run the validator by issuing the following command:
```
cd prysm
bazel run //validator
```
**Congratulations, you are now running Ethereum 2.0 Phase 0!**
## Setting up a local ETH2 development chain
This section outlines the process of setting up Prysm for local testing with other Ethereum 2.0 client implementations. See the [INTEROP.md](https://github.com/prysmaticlabs/prysm/blob/master/INTEROP.md) file for advanced configuration options. For more background information on interoperability development, see [this blog post](https://blog.ethereum.org/2019/09/19/eth2-interop-in-review/).
### Installation and dependencies
To begin setting up a local ETH2 development chain, follow the **Bazel** instructions found in the [dependencies](https://github.com/prysmaticlabs/prysm#dependencies) and [installation](https://github.com/prysmaticlabs/prysm#installation) sections respectively.
### Running a local beacon node and validator client
The example below will generate a beacon genesis state and initiate Prysm with 64 validators with the genesis time set to your machines UNIX time.
Open up two terminal windows. In the first, issue the command:
```text
bazel run //beacon-chain -- \
--custom-genesis-delay=0 \
--bootstrap-node= \
--deposit-contract $(curl https://prylabs.net/contract) \
--clear-db \
--interop-num-validators 64 \
--interop-eth1data-votes
```
Wait a moment for the beacon chain to start. In the other terminal, issue the command:
```text
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
```
This command will kickstart the system with your 64 validators performing their duties accordingly.
## Testing Prysm
To run the unit tests of our system, issue the command:
```
```text
bazel test //...
```
To run the linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
```
To run our linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
```text
golangci-lint run
```
## Contributing
We have put all of our contribution guidelines into [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md)! Check it out to get started.
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
## License
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)

425
WORKSPACE
View File

@@ -1,6 +1,48 @@
workspace(name = "prysm")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
http_archive(
name = "bazel_toolchains",
sha256 = "b5a8039df7119d618402472f3adff8a1bd0ae9d5e253f53fcc4c47122e91a3d2",
strip_prefix = "bazel-toolchains-2.1.1",
urls = [
"https://github.com/bazelbuild/bazel-toolchains/releases/download/2.1.1/bazel-toolchains-2.1.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/2.1.1.tar.gz",
],
)
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
bazel_toolchain_dependencies()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "9.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
configure_prysm_toolchains()
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
rbe_toolchains_config()
http_archive(
name = "bazel_skylib",
sha256 = "2ea8a5ed2b448baf4a6855d3ce049c4c452a6470b1efd1504fdb7c1c134d220a",
@@ -8,21 +50,12 @@ http_archive(
url = "https://github.com/bazelbuild/bazel-skylib/archive/0.8.0.tar.gz",
)
http_archive(
name = "io_bazel_rules_go",
sha256 = "513c12397db1bc9aa46dd62f02dd94b49a9b5d17444d49b5a04c5a89f3053c1c",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
],
)
http_archive(
name = "bazel_gazelle",
sha256 = "7fc87f4170011201b1690326e8c16c5d802836e3a0d617d8f75c3af2b23180c4",
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
],
)
@@ -35,9 +68,18 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "9ff889216e28c918811b77999257d4ac001c26c1f7c7fb17a79bc28abf74182e",
strip_prefix = "rules_docker-0.10.1",
url = "https://github.com/bazelbuild/rules_docker/archive/v0.10.1.tar.gz",
sha256 = "dc97fccceacd4c6be14e800b2a00693d5e8d07f69ee187babfd04a80a9f8e250",
strip_prefix = "rules_docker-0.14.1",
url = "https://github.com/bazelbuild/rules_docker/archive/v0.14.1.tar.gz",
)
http_archive(
name = "io_bazel_rules_go",
sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
],
)
http_archive(
@@ -50,21 +92,21 @@ git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1563544980 +0300",
shallow_since = "1569509514 +0300",
)
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
name = "com_github_gogo_protobuf",
commit = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c", # v1.2.1, as of 2019-03-03
commit = "5628607bb4c51c3157aacc3a50f0ab707582b805",
patch_args = ["-p1"],
patches = [
"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch",
"//third_party:com_github_gogo_protobuf-equal.patch",
],
remote = "https://github.com/gogo/protobuf",
shallow_since = "1550471403 +0200",
shallow_since = "1571033717 +0200",
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
@@ -75,6 +117,10 @@ load(
container_repositories()
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
@@ -94,8 +140,19 @@ load(
_go_image_repos = "repositories",
)
# Golang images
# This is using gcr.io/distroless/base
_go_image_repos()
# CC images
# This is using gcr.io/distroless/base
load(
"@io_bazel_rules_docker//cc:image.bzl",
_cc_image_repos = "repositories",
)
_cc_image_repos()
http_archive(
name = "prysm_testnet_site",
build_file_content = """
@@ -104,16 +161,16 @@ proto_library(
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "1184e44a7a9b8b172e68e82c02cc3b15a80122340e05a92bd1edeafe5e68debe",
strip_prefix = "prysm-testnet-site-ec6a4a4e421bf4445845969167d06e93ee8d7acc",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/ec6a4a4e421bf4445845969167d06e93ee8d7acc.tar.gz",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "5ab110312cd7665a1940ba0523b67b9fbb6053beb9dd4e147643867bebd7e809",
strip_prefix = "repo-infra-db6ceb5f992254db76af7c25db2edc5469b5ea82",
url = "https://github.com/kubernetes/repo-infra/archive/db6ceb5f992254db76af7c25db2edc5469b5ea82.tar.gz",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
strip_prefix = "repo-infra-6537f2101fb432b679f3d103ee729dd8ac5d30a0",
url = "https://github.com/kubernetes/repo-infra/archive/6537f2101fb432b679f3d103ee729dd8ac5d30a0.tar.gz",
)
http_archive(
@@ -128,8 +185,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5c5b65a961b5e7251435efc9548648b45142a07993ad3e100850c240cb76e9af",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/general.tar.gz",
sha256 = "72c6ee3c20d19736b1203f364a6eb0ddee2c173073e20bee2beccd288fdc42be",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/general.tar.gz",
)
http_archive(
@@ -144,8 +201,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "3b5f0168af4331d09da52bebc26609def9d11be3e6c784ce7c3df3596617808d",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/minimal.tar.gz",
sha256 = "a3cc860a3679f6f62ee57b65677a9b48a65fdebb151cdcbf50f23852632845ef",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/minimal.tar.gz",
)
http_archive(
@@ -160,8 +217,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f3ff68508dfe9696f23506daf0ca895cda955e30398741e00cffa33a01b0565c",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/mainnet.tar.gz",
sha256 = "8fc1b6220973ca30fa4ddc4ed24d66b1719abadca8bedb5e06c3bd9bc0df28e9",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/mainnet.tar.gz",
)
http_archive(
@@ -183,7 +240,7 @@ go_repository(
git_repository(
name = "com_google_protobuf",
commit = "09745575a923640154bcf307fba8aedff47f240a",
commit = "4cf5bfee9546101d98754d23ff378ff718ba8438",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1558721209 -0700",
)
@@ -192,6 +249,30 @@ load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()
# Group the sources of the library so that CMake rule have access to it
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "rules_foreign_cc",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
strip_prefix = "rules_foreign_cc-456425521973736ef346d93d3d6ba07d807047df",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/456425521973736ef346d93d3d6ba07d807047df.zip",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
rules_foreign_cc_dependencies([
"@prysm//:built_cmake_toolchain",
])
http_archive(
name = "librdkafka",
build_file_content = all_content,
sha256 = "f6be27772babfdacbbf2e4c5432ea46c57ef5b7d82e52a81b885e7b804781fd6",
strip_prefix = "librdkafka-1.2.1",
urls = ["https://github.com/edenhill/librdkafka/archive/v1.2.1.tar.gz"],
)
# External dependencies
go_repository(
@@ -209,7 +290,7 @@ go_repository(
go_repository(
name = "com_github_prysmaticlabs_go_ssz",
commit = "58b2f86b0f02f06e634db06dee0c838ad41849f8",
commit = "e24db4d9e9637cf88ee9e4a779e339a1686a84ee",
importpath = "github.com/prysmaticlabs/go-ssz",
)
@@ -245,7 +326,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p",
commit = "c1687281a5c19b61ee5e0dc07fad15697c3bde94", # v0.4.0
commit = "76944c4fc848530530f6be36fb22b70431ca506c", # v0.5.1
importpath = "github.com/libp2p/go-libp2p",
)
@@ -264,7 +345,7 @@ go_repository(
go_repository(
name = "com_github_multiformats_go_multiaddr",
commit = "f96df18bf0c217c77f6cc0f9e810a178cea12f38", # v0.1.1
commit = "8c6cee15b340d7210c30a82a19231ee333b69b1d", # v0.2.0
importpath = "github.com/multiformats/go-multiaddr",
)
@@ -276,7 +357,7 @@ go_repository(
go_repository(
name = "com_github_multiformats_go_multihash",
commit = "249ead2008065c476a2ee45e8e75e8b85d846a72", # v0.0.8
commit = "6b39927dce4869bc1726861b65ada415ee1f7fc7", # v0.0.13
importpath = "github.com/multiformats/go-multihash",
)
@@ -294,13 +375,13 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_peerstore",
commit = "f4c9af195c69379f1cf284dba31985482a56f78e", # v0.1.3
commit = "dee88d7532302c001604811fa3fbb5a7f83225e7", # v0.1.4
importpath = "github.com/libp2p/go-libp2p-peerstore",
)
go_repository(
name = "com_github_libp2p_go_libp2p_circuit",
commit = "0305622f3f146485f0ff6df0ae6c010787331ca7", # v0.1.3
commit = "61af9db0dd78e01e53b9fb044be44dcc7255667e", # v0.1.4
importpath = "github.com/libp2p/go-libp2p-circuit",
)
@@ -379,7 +460,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_secio",
build_file_proto_mode = "disable_global",
commit = "7c3f577d99debb69c3b68be35fe14d9445a6569c", # v0.2.0
commit = "6f83420d5715a8b1c4082aaf9c5c7785923e702e", # v0.2.1
importpath = "github.com/libp2p/go-libp2p-secio",
)
@@ -397,7 +478,7 @@ go_repository(
go_repository(
name = "com_github_jbenet_goprocess",
commit = "1dc239722b2ba3784472fb5301f62640fa5a8bc3", # v0.1.3
commit = "7f9d9ed286badffcf2122cfeb383ec37daf92508",
importpath = "github.com/jbenet/goprocess",
)
@@ -415,7 +496,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_nat",
commit = "c50c291a61bceccb914366d93eb24f58594e9134", # v0.0.4
commit = "873ef75f6ab6273821d77197660c1fb3af4cc02e", # v0.0.5
importpath = "github.com/libp2p/go-libp2p-nat",
)
@@ -433,7 +514,7 @@ go_repository(
go_repository(
name = "com_github_mattn_go_isatty",
commit = "e1f7b56ace729e4a73a29a6b4fac6cd5fcda7ab3", # v0.0.9
commit = "7b513a986450394f7bbf1476909911b3aa3a55ce",
importpath = "github.com/mattn/go-isatty",
)
@@ -517,7 +598,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_flow_metrics",
commit = "1f5b3acc846b2c8ce4c4e713296af74f5c24df55", # v0.0.1
commit = "e5a6a4db89199d99b2a74b8da198277a826241d8", # v0.0.3
importpath = "github.com/libp2p/go-flow-metrics",
)
@@ -541,7 +622,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_ws_transport",
commit = "8cca0dbc7f3533b122bd2cbeaa4a9b07c2913b9d", # v0.1.2
commit = "370d1a3a7420e27423417c37630cad3754ad5702", # v0.2.0
importpath = "github.com/libp2p/go-ws-transport",
)
@@ -581,6 +662,12 @@ go_repository(
importpath = "github.com/syndtr/goleveldb",
)
go_repository(
name = "com_github_emicklei_dot",
commit = "5810de2f2ab7aac98cd7bcbd59147a7ca6071768",
importpath = "github.com/emicklei/dot",
)
go_repository(
name = "com_github_libp2p_go_libp2p_blankhost",
commit = "da3b45205dfce3ef3926054ffd5dee76f5903382", # v0.1.4
@@ -589,22 +676,23 @@ go_repository(
go_repository(
name = "io_opencensus_go",
commit = "7bbec1755a8162b5923fc214a494773a701d506a", # v0.22.0
importpath = "go.opencensus.io",
sum = "h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=",
version = "v0.22.2",
)
go_repository(
name = "io_opencensus_go_contrib_exporter_jaeger",
commit = "5b8293c22f362562285c2acbc52f4a1870a47a33",
importpath = "contrib.go.opencensus.io/exporter/jaeger",
remote = "http://github.com/census-ecosystem/opencensus-go-exporter-jaeger",
vcs = "git",
sum = "h1:nhTv/Ry3lGmqbJ/JGvCjWxBl5ozRfqo86Ngz59UAlfk=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_google_api",
commit = "aac82e61c0c8fe133c297b4b59316b9f481e1f0a", # v0.6.0
importpath = "google.golang.org/api",
sum = "h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE=",
version = "v0.14.0",
)
go_repository(
@@ -639,19 +727,19 @@ go_repository(
go_repository(
name = "com_github_prometheus_client_model",
commit = "fd36f4220a901265f90734c3183c5f0c91daa0b8",
commit = "7bc5445566f0fe75b15de23e6b93886e982d7bf9",
importpath = "github.com/prometheus/client_model",
)
go_repository(
name = "com_github_prometheus_common",
commit = "287d3e634a1e550c9e463dd7e5a75a422c614505", # v0.7.0
commit = "d978bcb1309602d68bb4ba69cf3f8ed900e07308",
importpath = "github.com/prometheus/common",
)
go_repository(
name = "com_github_prometheus_procfs",
commit = "499c85531f756d1129edd26485a5f73871eeb308", # v0.0.5
commit = "6d489fc7f1d9cd890a250f3ea3431b1744b9623f",
importpath = "github.com/prometheus/procfs",
)
@@ -694,8 +782,9 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_pubsub",
build_file_proto_mode = "disable_global",
commit = "9f04364996b415168f0e0d7e9fc82272fbed4005", # v0.1.1
importpath = "github.com/libp2p/go-libp2p-pubsub",
sum = "h1:+Iz8zeI1KO6HX8cexU9g98cCGjae52Vujeg087SkuME=",
version = "v0.2.6-0.20191219233527-97846b574895",
)
go_repository(
@@ -719,7 +808,7 @@ go_repository(
go_repository(
name = "com_github_ipfs_go_datastore",
commit = "d0ca9bc39f9d5b77bd602abe1a897473e105be7f", # v0.1.1
commit = "e7a498916ccca1b0b40fb08630659cd4d68a01e8", # v0.3.1
importpath = "github.com/ipfs/go-datastore",
)
@@ -731,14 +820,14 @@ go_repository(
go_repository(
name = "com_github_ipfs_go_cid",
commit = "9bb7ea69202c6c9553479eb355ab8a8a97d43a2e", # v0.0.3
commit = "3da5bbbe45260437a44f777e6b2e5effa2606901", # v0.0.4
importpath = "github.com/ipfs/go-cid",
)
go_repository(
name = "com_github_libp2p_go_libp2p_record",
build_file_proto_mode = "disable_global",
commit = "3f535b1abcdf698e11ac16f618c2e64c4e5a114a", # v0.1.1
commit = "8ccbca30634f70a8f03d133ac64cbf245d079e1e", # v0.1.2
importpath = "github.com/libp2p/go-libp2p-record",
)
@@ -750,7 +839,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_kbucket",
commit = "8b77351e0f784a5f71749d23000897c8aee71a76", # v0.2.1
commit = "a0cac6f63c491504b18eeba24be2ac0bbbfa0e5c", # v0.2.3
importpath = "github.com/libp2p/go-libp2p-kbucket",
)
@@ -774,7 +863,7 @@ go_repository(
go_repository(
name = "com_github_hashicorp_golang_lru",
commit = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d", # v0.5.3
commit = "14eae340515388ca95aa8e7b86f0de668e981f54", # v0.5.4
importpath = "github.com/hashicorp/golang-lru",
)
@@ -786,13 +875,14 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_discovery",
commit = "d248d63b0af8c023307da18ad7000a12020e06f0", # v0.1.0
importpath = "github.com/libp2p/go-libp2p-discovery",
sum = "h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_libp2p_go_libp2p_autonat",
commit = "3464f9b4f7bfbd7bb008813eacb626c7ab7fb9a3", # v0.1.0
commit = "60bf479cf6bc73c939f4db97ad711756e949e522", # v0.1.1
importpath = "github.com/libp2p/go-libp2p-autonat",
)
@@ -818,7 +908,7 @@ go_repository(
go_repository(
name = "io_k8s_apimachinery",
build_file_proto_mode = "disable_global",
commit = "bfcf53abc9f82bad3e534fcb1c36599d3c989ebf",
commit = "79c2a76c473a20cdc4ce59cae4b72529b5d9d16b", # v0.17.2
importpath = "k8s.io/apimachinery",
)
@@ -830,8 +920,9 @@ go_repository(
go_repository(
name = "com_github_google_gofuzz",
commit = "f140a6486e521aad38f5917de355cbf147cc0496", # v1.0.0
importpath = "github.com/google/gofuzz",
sum = "h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=",
version = "v1.0.0",
)
go_repository(
@@ -899,7 +990,7 @@ go_repository(
go_repository(
name = "com_google_cloud_go",
commit = "264def2dd949cdb8a803bb9f50fa29a67b798a6a", # v0.46.3
commit = "6daa679260d92196ffca2362d652c924fdcb7a22", # v0.52.0
importpath = "cloud.google.com/go",
)
@@ -941,7 +1032,7 @@ go_repository(
go_repository(
name = "com_github_pkg_errors",
commit = "ba968bfe8b2f7e042a574c888954fccecfa385b4", # v0.8.1
commit = "614d223910a179a466c1767a985424175c39b465", # v0.9.1
importpath = "github.com/pkg/errors",
)
@@ -983,7 +1074,7 @@ go_repository(
go_repository(
name = "com_github_apache_thrift",
commit = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33", # v0.12.0
commit = "cecee50308fc7e6f77f55b3fd906c1c6c471fa2f", # v0.13.0
importpath = "github.com/apache/thrift",
)
@@ -993,15 +1084,9 @@ go_repository(
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
)
go_repository(
name = "com_github_karlseguin_ccache",
commit = "ec06cd93a07565b373789b0078ba88fe697fddd9", # v2.0.3
importpath = "github.com/karlseguin/ccache",
)
go_repository(
name = "com_github_libp2p_go_libp2p_connmgr",
commit = "b46e9bdbcd8436b4fe4b30a53ec913c07e5e09c9", # v0.1.1
commit = "273839464339f1885413b385feee35301c5cb76f", # v0.2.1
importpath = "github.com/libp2p/go-libp2p-connmgr",
)
@@ -1032,13 +1117,13 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_libp2p_core",
build_file_proto_mode = "disable_global",
commit = "26b960839df84e2783f8f6125fa822a9978c2b8f", # v0.2.3
commit = "f7f724862d85ec9f9ee7c58b0f79836abdee8cd9", # v0.3.0
importpath = "github.com/libp2p/go-libp2p-core",
)
go_repository(
name = "com_github_libp2p_go_libp2p_testing",
commit = "1fa303da162dc57872d8fc553497f7602aa11c10", # v0.1.0
commit = "82713a62880a5fe72d438bd58d737f0d3c4b7f36", # v0.1.1
importpath = "github.com/libp2p/go-libp2p-testing",
)
@@ -1066,6 +1151,12 @@ go_repository(
importpath = "github.com/multiformats/go-multiaddr-fmt",
)
go_repository(
name = "com_github_multiformats_go_varint",
commit = "0aa688902217dff2cba0f678c7e4a0f547b4983e",
importpath = "github.com/multiformats/go-varint",
)
go_repository(
name = "com_github_libp2p_go_yamux",
commit = "663972181d409e7263040f0b668462f87c85e1bd", # v1.2.3
@@ -1074,7 +1165,7 @@ go_repository(
go_repository(
name = "com_github_libp2p_go_nat",
commit = "d13fdefb3bbb2fde2c6fc090a7ea992cec8b26df", # v0.0.3
commit = "4b355d438085545df006ad9349686f30d8d37a27", # v0.0.4
importpath = "github.com/libp2p/go-nat",
)
@@ -1139,7 +1230,7 @@ go_repository(
go_repository(
name = "com_github_googleapis_gnostic",
commit = "ab0dd09aa10e2952b28e12ecd35681b20463ebab", # v0.3.1
commit = "896953e6749863beec38e27029c804e88c3144b8", # v0.4.1
importpath = "github.com/googleapis/gnostic",
)
@@ -1163,7 +1254,7 @@ go_repository(
go_repository(
name = "com_github_google_go_cmp",
commit = "2d0692c2e9617365a95b295612ac0d4415ba4627", # v0.3.1
commit = "5a6f75716e1203a923a78c9efb94089d857df0f6", # v0.4.0
importpath = "github.com/google/go-cmp",
)
@@ -1194,21 +1285,26 @@ go_repository(
)
go_repository(
name = "com_github_googleapis_gnostic",
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
importpath = "github.com/googleapis/gnostic",
name = "com_github_patrickmn_go_cache",
commit = "46f407853014144407b6c2ec7ccc76bf67958d93",
importpath = "github.com/patrickmn/go-cache",
)
go_repository(
name = "com_github_prysmaticlabs_ethereumapis",
commit = "c7f1fd03716c94dcc287a0d35905ed35b8a0afe1",
commit = "fca4d6f69bedb8615c2fc916d1a68f2692285caa",
importpath = "github.com/prysmaticlabs/ethereumapis",
patch_args = ["-p1"],
patches = [
"//third_party:com_github_prysmaticlabs_ethereumapis-tags.patch",
],
)
go_repository(
name = "com_github_cloudflare_roughtime",
commit = "d41fdcee702eb3e5c3296288a453b9340184d37e",
importpath = "github.com/cloudflare/roughtime",
sum = "h1:jeSxE3fepJdhASERvBHI6RFkMhISv6Ir2JUybYLIVXs=",
version = "v0.0.0-20200205191924-a69ef1dab727",
)
go_repository(
@@ -1238,13 +1334,6 @@ go_repository(
version = "v0.0.0-20161005185022-dfcf01d20ee9",
)
go_repository(
name = "com_github_kilic_bls12-381",
importpath = "github.com/kilic/bls12-381",
sum = "h1:hCD4IWWYsETkACK7U+isYppKfB/6d54sBkCDk3k+w2U=",
version = "v0.0.0-20191005202515-c798d6202457",
)
go_repository(
name = "com_github_minio_highwayhash",
importpath = "github.com/minio/highwayhash",
@@ -1259,6 +1348,15 @@ go_repository(
version = "v0.0.0-20191002040644-a1355ae1e2c3",
)
go_repository(
name = "in_gopkg_confluentinc_confluent_kafka_go_v1",
importpath = "gopkg.in/confluentinc/confluent-kafka-go.v1",
patch_args = ["-p1"],
patches = ["//third_party:in_gopkg_confluentinc_confluent_kafka_go_v1.patch"],
sum = "h1:roy97m/3wj9/o8OuU3sZ5wildk30ep38k2x8nhNbKrI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_naoina_toml",
importpath = "github.com/naoina/toml",
@@ -1349,3 +1447,154 @@ go_repository(
sum = "h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=",
version = "v1.7.0",
)
go_repository(
name = "com_github_protolambda_zssz",
commit = "632f11e5e281660402bd0ac58f76090f3503def0",
importpath = "github.com/protolambda/zssz",
)
go_repository(
name = "com_github_googleapis_gnostic",
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
importpath = "github.com/googleapis/gnostic",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=",
version = "v0.0.0-20191027212112-611e8accdfc9",
)
go_repository(
name = "com_github_uber_jaeger_client_go",
importpath = "github.com/uber/jaeger-client-go",
sum = "h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU=",
version = "v2.20.1+incompatible",
)
go_repository(
name = "com_github_dgraph_io_ristretto",
commit = "99d1bbbf28e64530eb246be0568fc7709a35ebdd", # v0.0.1
importpath = "github.com/dgraph-io/ristretto",
)
go_repository(
name = "com_github_cespare_xxhash",
commit = "d7df74196a9e781ede915320c11c378c1b2f3a1f",
importpath = "github.com/cespare/xxhash",
)
go_repository(
name = "com_github_ipfs_go_detect_race",
importpath = "github.com/ipfs/go-detect-race",
sum = "h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=",
version = "v0.0.1",
)
go_repository(
name = "com_github_kevinms_leakybucket_go",
importpath = "github.com/kevinms/leakybucket-go",
sum = "h1:oq6BiN7v0MfWCRcJAxSV+hesVMAAV8COrQbTjYNnso4=",
version = "v0.0.0-20190611015032-8a3d0352aa79",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet",
commit = "6970d62e60d86fdae3c3e510e800e8a60d755a7d",
importpath = "github.com/wealdtech/go-eth2-wallet",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_hd",
commit = "ce0a252a01c621687e9786a64899cfbfe802ba73",
importpath = "github.com/wealdtech/go-eth2-wallet-hd",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_nd",
commit = "12c8c41cdbd16797ff292e27f58e126bb89e9706",
importpath = "github.com/wealdtech/go-eth2-wallet-nd",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_store_filesystem",
commit = "1eea6a48d75380047d2ebe7c8c4bd8985bcfdeca",
importpath = "github.com/wealdtech/go-eth2-wallet-store-filesystem",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_store_s3",
commit = "1c821b5161f7bb0b3efa2030eff687eea5e70e53",
importpath = "github.com/wealdtech/go-eth2-wallet-store-s3",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4",
commit = "0c11c07b9544eb662210fadded94f40f309d8c8f",
importpath = "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4",
)
go_repository(
name = "com_github_wealdtech_go_eth2_wallet_types",
commit = "af67d8101be61e7c4dd8126d2b3eba20cff5dab2",
importpath = "github.com/wealdtech/go-eth2-wallet-types",
)
go_repository(
name = "com_github_wealdtech_go_eth2_types",
commit = "f9c31ddf180537dd5712d5998a3d56c45864d71f",
importpath = "github.com/wealdtech/go-eth2-types",
)
go_repository(
name = "com_github_wealdtech_go_eth2_util",
commit = "326ebb1755651131bb8f4506ea9a23be6d9ad1dd",
importpath = "github.com/wealdtech/go-eth2-util",
)
go_repository(
name = "com_github_wealdtech_go_ecodec",
commit = "7473d835445a3490e61a5fcf48fe4e9755a37957",
importpath = "github.com/wealdtech/go-ecodec",
)
go_repository(
name = "com_github_wealdtech_go_bytesutil",
commit = "e564d0ade555b9f97494f0f669196ddcc6bc531d",
importpath = "github.com/wealdtech/go-bytesutil",
)
go_repository(
name = "com_github_wealdtech_go_indexer",
commit = "334862c32b1e3a5c6738a2618f5c0a8ebeb8cd51",
importpath = "github.com/wealdtech/go-indexer",
)
go_repository(
name = "com_github_shibukawa_configdir",
commit = "e180dbdc8da04c4fa04272e875ce64949f38bd3e",
importpath = "github.com/shibukawa/configdir",
)
go_repository(
name = "com_github_libp2p_go_libp2p_noise",
importpath = "github.com/libp2p/go-libp2p-noise",
sum = "h1:J1gHJRNFEk7NdiaPQQqAvxEy+7hhCsVv3uzduWybmqY=",
version = "v0.0.0-20200302201340-8c54356e12c9",
)
go_repository(
name = "com_github_ferranbt_fastssz",
importpath = "github.com/ferranbt/fastssz",
sum = "h1:oUQredbOIzWIMmeGR9dTLzSi4DqRVwxrPzSDiLJBp4Q=",
version = "v0.0.0-20200310214500-3283b9706406",
)

11
bazel.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more heremtic build.
env -i \
PATH=/usr/bin:/bin \
HOME=$HOME \
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
bazel "$@"

View File

@@ -1,7 +1,7 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
load("//tools:binary_targets.bzl", "binary_targets")
load("//tools:binary_targets.bzl", "binary_targets", "go_image_debug")
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
go_library(
@@ -36,6 +36,7 @@ go_image(
"main.go",
"usage.go",
],
base = "//tools:cc_image",
goarch = "amd64",
goos = "linux",
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
@@ -70,16 +71,39 @@ container_bundle(
tags = ["manual"],
)
go_image_debug(
name = "image_debug",
image = ":image",
)
container_bundle(
name = "image_bundle_debug",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
},
tags = ["manual"],
)
docker_push(
name = "push_images",
bundle = ":image_bundle",
tags = ["manual"],
)
docker_push(
name = "push_images_debug",
bundle = ":image_bundle_debug",
tags = ["manual"],
)
go_binary(
name = "beacon-chain",
embed = [":go_default_library"],
visibility = ["//beacon-chain:__subpackages__"],
visibility = [
"//beacon-chain:__subpackages__",
"//endtoend:__pkg__",
],
)
go_test(

View File

@@ -5,6 +5,6 @@ This is the main project folder for the beacon chain implementation of Ethereum
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/KSA7rPr)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/prysmaticlabs/prysm?badge&utm_medium=badge&utm_campaign=pr-badge)
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
Check out the [FAQs](https://notes.ethereum.org/9MMuzWeFTTSg-3Tz_YeiBA?view). Refer this page on [why](http://email.mg2.substack.com/c/eJwlj9GOhCAMRb9G3jRQQPGBh5mM8xsbhKrsDGIAM9m_X9xN2qZtbpt7rCm4xvSjj5gLOTOmL-809CMbKXFaOKakIl4DZYr2AGyQIGjHOnWH22OiYnoIxmDijaBhhS6fcy7GvjobA9m0mSXOcnZq5GBqLkilXBZhBsus5ZK89VbKkRt-a-BZI6DzZ7iur1lQ953KJ9bemnxgahuQU9XJu6pFPdu8meT8vragzEjpMCwMGLlgLo6h5z1JumQTu4IJd4v15xqMf_8ZLP_Y1bSLdbnrD-LL71i2Kj7DLxaWWF4)
we are combining sharding and casper together.

View File

@@ -7,14 +7,16 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
@@ -25,14 +27,18 @@ go_test(
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",

View File

@@ -5,13 +5,15 @@ import (
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
@@ -21,31 +23,33 @@ var log = logrus.WithField("prefix", "archiver")
// Service defining archiver functionality for persisting checkpointed
// beacon chain information to a database backend for historical purposes.
type Service struct {
ctx context.Context
cancel context.CancelFunc
beaconDB db.Database
headFetcher blockchain.HeadFetcher
newHeadNotifier blockchain.NewHeadNotifier
newHeadRootChan chan [32]byte
ctx context.Context
cancel context.CancelFunc
beaconDB db.NoHeadAccessDatabase
headFetcher blockchain.HeadFetcher
participationFetcher blockchain.ParticipationFetcher
stateNotifier statefeed.Notifier
lastArchivedEpoch uint64
}
// Config options for the archiver service.
type Config struct {
BeaconDB db.Database
HeadFetcher blockchain.HeadFetcher
NewHeadNotifier blockchain.NewHeadNotifier
BeaconDB db.NoHeadAccessDatabase
HeadFetcher blockchain.HeadFetcher
ParticipationFetcher blockchain.ParticipationFetcher
StateNotifier statefeed.Notifier
}
// NewArchiverService initializes the service from configuration options.
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
ctx, cancel := context.WithCancel(ctx)
return &Service{
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
headFetcher: cfg.HeadFetcher,
newHeadNotifier: cfg.NewHeadNotifier,
newHeadRootChan: make(chan [32]byte, 1),
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
headFetcher: cfg.HeadFetcher,
participationFetcher: cfg.ParticipationFetcher,
stateNotifier: cfg.StateNotifier,
}
}
@@ -67,41 +71,46 @@ func (s *Service) Status() error {
}
// We archive committee information pertaining to the head state's epoch.
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState) error {
currentEpoch := helpers.SlotToEpoch(headState.Slot)
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return errors.Wrap(err, "could not generate seed")
}
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return errors.Wrap(err, "could not generate seed")
}
info := &ethpb.ArchivedCommitteeInfo{
info := &pb.ArchivedCommitteeInfo{
ProposerSeed: proposerSeed[:],
AttesterSeed: attesterSeed[:],
}
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, currentEpoch, info); err != nil {
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
return errors.Wrap(err, "could not archive committee info")
}
return nil
}
// We archive active validator set changes that happened during the epoch.
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState) error {
activations := validators.ActivatedValidatorIndices(headState)
slashings := validators.SlashedValidatorIndices(headState)
exited, err := validators.ExitedValidatorIndices(headState)
// We archive active validator set changes that happened during the previous epoch.
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
prevEpoch := epoch - 1
vals := headState.Validators()
activations := validators.ActivatedValidatorIndices(prevEpoch, vals)
slashings := validators.SlashedValidatorIndices(prevEpoch, vals)
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
if err != nil {
return errors.Wrap(err, "could not get active validator count")
}
exited, err := validators.ExitedValidatorIndices(prevEpoch, vals, activeValidatorCount)
if err != nil {
return errors.Wrap(err, "could not determine exited validator indices")
}
activeSetChanges := &ethpb.ArchivedActiveSetChanges{
activeSetChanges := &pb.ArchivedActiveSetChanges{
Activated: activations,
Exited: exited,
Slashed: slashings,
}
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, helpers.CurrentEpoch(headState), activeSetChanges); err != nil {
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
return errors.Wrap(err, "could not archive active validator set changes")
}
return nil
@@ -109,60 +118,78 @@ func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.Bea
// We compute participation metrics by first retrieving the head state and
// matching validator attestations during the epoch.
func (s *Service) archiveParticipation(ctx context.Context, headState *pb.BeaconState) error {
participation, err := epoch.ComputeValidatorParticipation(headState, helpers.SlotToEpoch(headState.Slot))
if err != nil {
return errors.Wrap(err, "could not compute participation")
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
p := s.participationFetcher.Participation(epoch)
participation := &ethpb.ValidatorParticipation{}
if p != nil {
participation = &ethpb.ValidatorParticipation{
EligibleEther: p.PrevEpoch,
VotedEther: p.PrevEpochTargetAttesters,
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
}
}
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, helpers.SlotToEpoch(headState.Slot), participation)
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
}
// We archive validator balances and active indices.
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState) error {
balances := headState.Balances
currentEpoch := helpers.CurrentEpoch(headState)
if err := s.beaconDB.SaveArchivedBalances(ctx, currentEpoch, balances); err != nil {
func (s *Service) archiveBalances(ctx context.Context, balances []uint64, epoch uint64) error {
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
return errors.Wrap(err, "could not archive balances")
}
return nil
}
func (s *Service) run(ctx context.Context) {
sub := s.newHeadNotifier.HeadUpdatedFeed().Subscribe(s.newHeadRootChan)
defer sub.Unsubscribe()
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case r := <-s.newHeadRootChan:
log.WithField("headRoot", fmt.Sprintf("%#x", r)).Debug("New chain head event")
headState := s.headFetcher.HeadState()
if !helpers.IsEpochEnd(headState.Slot) {
continue
case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed {
data := event.Data.(*statefeed.BlockProcessedData)
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
headState, err := s.headFetcher.HeadState(ctx)
if err != nil {
log.WithError(err).Error("Head state is not available")
continue
}
slot := headState.Slot()
currentEpoch := helpers.SlotToEpoch(slot)
if !helpers.IsEpochEnd(slot) && currentEpoch <= s.lastArchivedEpoch {
continue
}
epochToArchive := currentEpoch
if !helpers.IsEpochEnd(slot) {
epochToArchive--
}
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive committee info")
continue
}
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive active validator set changes")
continue
}
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive validator participation")
continue
}
if err := s.archiveBalances(ctx, headState.Balances(), epochToArchive); err != nil {
log.WithError(err).Error("Could not archive validator balances and active indices")
continue
}
log.WithField(
"epoch",
epochToArchive,
).Debug("Successfully archived beacon chain data during epoch")
s.lastArchivedEpoch = epochToArchive
}
if err := s.archiveCommitteeInfo(ctx, headState); err != nil {
log.WithError(err).Error("Could not archive committee info")
continue
}
if err := s.archiveActiveSetChanges(ctx, headState); err != nil {
log.WithError(err).Error("Could not archive active validator set changes")
continue
}
if err := s.archiveParticipation(ctx, headState); err != nil {
log.WithError(err).Error("Could not archive validator participation")
continue
}
if err := s.archiveBalances(ctx, headState); err != nil {
log.WithError(err).Error("Could not archive validator balances and active indices")
continue
}
log.WithField(
"epoch",
helpers.CurrentEpoch(headState),
).Debug("Successfully archived beacon chain data during epoch")
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-sub.Err():
log.WithError(err).Error("Subscription to new chain head notifier failed")
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state feed notifier failed")
return
}
}

View File

@@ -8,13 +8,17 @@ import (
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/sirupsen/logrus"
@@ -24,20 +28,32 @@ import (
func init() {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
}
func TestArchiverService_ReceivesNewChainHeadEvent(t *testing.T) {
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
hook := logTest.NewGlobal()
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: &pb.BeaconState{Slot: 1},
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 1,
})
if err != nil {
t.Fatal(err)
}
headRoot := [32]byte{1, 2, 3}
triggerNewHeadEvent(t, svc, headRoot)
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", headRoot))
testutil.AssertLogsContain(t, hook, "New chain head event")
svc.headFetcher = &mock.ChainService{
State: st,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
testutil.AssertLogsContain(t, hook, "Received block processed event")
}
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
@@ -45,33 +61,112 @@ func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
// The head state is NOT an epoch end.
svc.headFetcher = &mock.ChainService{
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 3},
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch - 2,
})
if err != nil {
t.Fatal(err)
}
triggerNewHeadEvent(t, svc, [32]byte{})
svc.headFetcher = &mock.ChainService{
State: st,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
// The context should have been canceled.
if svc.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
testutil.AssertLogsContain(t, hook, "New chain head event")
testutil.AssertLogsContain(t, hook, "Received block processed event")
// The service should ONLY log any archival logs if we receive a
// head slot that is an epoch end.
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
}
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
hook := logTest.NewGlobal()
svc, beaconDB := setupService(t)
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
defer dbutil.TeardownDB(t, beaconDB)
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
exitRoutine := make(chan bool)
go func() {
svc.run(svc.ctx)
<-exitRoutine
}()
// Send out an event every slot, skipping the end slot of the epoch.
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
if err := headState.SetSlot(i); err != nil {
t.Fatal(err)
}
svc.headFetcher = &mock.ChainService{
State: headState,
}
if helpers.IsEpochEnd(i) {
continue
}
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = svc.stateNotifier.StateFeed().Send(event)
}
}
if err := svc.Stop(); err != nil {
t.Fatal(err)
}
exitRoutine <- true
// The context should have been canceled.
if svc.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
testutil.AssertLogsContain(t, hook, "Received block processed event")
// Even though there was a skip slot, we should still be able to archive
// upon the next block event afterwards.
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
triggerNewHeadEvent(t, svc, [32]byte{})
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
attestedBalance := uint64(1)
currentEpoch := helpers.CurrentEpoch(headState)
wanted := &ethpb.ValidatorParticipation{
VotedEther: attestedBalance,
@@ -85,7 +180,7 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
}
if !proto.Equal(wanted, retrieved) {
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch, wanted, retrieved)
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
@@ -93,23 +188,33 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
triggerNewHeadEvent(t, svc, [32]byte{})
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(headState.Balances, retrieved) {
if !reflect.DeepEqual(headState.Balances(), retrieved) {
t.Errorf(
"Wanted balances for epoch %d %v, retrieved %v",
helpers.CurrentEpoch(headState),
headState.Balances,
headState.Balances(),
retrieved,
)
}
@@ -119,13 +224,23 @@ func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
triggerNewHeadEvent(t, svc, [32]byte{})
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
currentEpoch := helpers.CurrentEpoch(headState)
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
@@ -136,7 +251,7 @@ func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
if err != nil {
t.Fatal(err)
}
wanted := &ethpb.ArchivedCommitteeInfo{
wanted := &pb.ArchivedCommitteeInfo{
ProposerSeed: proposerSeed[:],
AttesterSeed: attesterSeed[:],
}
@@ -159,22 +274,49 @@ func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
currentEpoch := helpers.CurrentEpoch(headState)
delayedActEpoch := helpers.DelayedActivationExitEpoch(currentEpoch)
headState.Validators[4].ActivationEpoch = delayedActEpoch
headState.Validators[5].ActivationEpoch = delayedActEpoch
triggerNewHeadEvent(t, svc, [32]byte{})
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
prevEpoch := helpers.PrevEpoch(headState)
delayedActEpoch := helpers.ActivationExitEpoch(prevEpoch)
val1, err := headState.ValidatorAtIndex(4)
if err != nil {
t.Fatal(err)
}
val1.ActivationEpoch = delayedActEpoch
val2, err := headState.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
val2.ActivationEpoch = delayedActEpoch
if err := headState.UpdateValidatorAtIndex(4, val1); err != nil {
t.Fatal(err)
}
if err := headState.UpdateValidatorAtIndex(5, val1); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
}
@@ -184,21 +326,48 @@ func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
currentEpoch := helpers.CurrentEpoch(headState)
headState.Validators[95].Slashed = true
headState.Validators[96].Slashed = true
triggerNewHeadEvent(t, svc, [32]byte{})
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
prevEpoch := helpers.PrevEpoch(headState)
val1, err := headState.ValidatorAtIndex(95)
if err != nil {
t.Fatal(err)
}
val1.Slashed = true
val2, err := headState.ValidatorAtIndex(96)
if err != nil {
t.Fatal(err)
}
val2.Slashed = true
if err := headState.UpdateValidatorAtIndex(95, val1); err != nil {
t.Fatal(err)
}
if err := headState.UpdateValidatorAtIndex(96, val1); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
}
@@ -208,28 +377,47 @@ func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState := setupState(t, validatorCount)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
defer dbutil.TeardownDB(t, beaconDB)
svc.headFetcher = &mock.ChainService{
State: headState,
}
currentEpoch := helpers.CurrentEpoch(headState)
headState.Validators[95].ExitEpoch = currentEpoch + 1
headState.Validators[95].WithdrawableEpoch = currentEpoch + 1 + params.BeaconConfig().MinValidatorWithdrawabilityDelay
triggerNewHeadEvent(t, svc, [32]byte{})
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
prevEpoch := helpers.PrevEpoch(headState)
val, err := headState.ValidatorAtIndex(95)
if err != nil {
t.Fatal(err)
}
val.ExitEpoch = prevEpoch
val.WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
if err := headState.UpdateValidatorAtIndex(95, val); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
testutil.AssertLogsContain(t, hook, "Successfully archived")
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
func setupState(validatorCount uint64) (*stateTrie.BeaconState, error) {
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
@@ -245,40 +433,47 @@ func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
// We initialize a head state that has attestations from participated
// validators in a simulated fashion.
return &pb.BeaconState{
return stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1,
Validators: validators,
Balances: balances,
BlockRoots: make([][]byte, 128),
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
Slashings: []uint64{0, 1e9, 1e9},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentEpochAttestations: atts,
FinalizedCheckpoint: &ethpb.Checkpoint{},
JustificationBits: bitfield.Bitvector4{0x00},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{},
}
})
}
func setupService(t *testing.T) (*Service, db.Database) {
beaconDB := dbutil.SetupDB(t)
ctx, cancel := context.WithCancel(context.Background())
validatorCount := uint64(100)
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
mockChainService := &mock.ChainService{}
return &Service{
beaconDB: beaconDB,
ctx: ctx,
cancel: cancel,
newHeadRootChan: make(chan [32]byte, 0),
newHeadNotifier: &mock.ChainService{},
beaconDB: beaconDB,
ctx: ctx,
cancel: cancel,
stateNotifier: mockChainService.StateNotifier(),
participationFetcher: &mock.ChainService{
Balance: &precompute.Balance{PrevEpoch: totalBalance, PrevEpochTargetAttesters: 1}},
}, beaconDB
}
func triggerNewHeadEvent(t *testing.T, svc *Service, headRoot [32]byte) {
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
exitRoutine := make(chan bool)
go func() {
svc.run(svc.ctx)
<-exitRoutine
}()
svc.newHeadRootChan <- headRoot
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = svc.stateNotifier.StateFeed().Send(event)
}
if err := svc.Stop(); err != nil {
t.Fatal(err)
}

View File

@@ -4,9 +4,15 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"head.go",
"info.go",
"init_sync_process_block.go",
"log.go",
"metrics.go",
"process_attestation.go",
"process_attestation_helpers.go",
"process_block.go",
"process_block_helpers.go",
"receive_attestation.go",
"receive_block.go",
"service.go",
@@ -14,26 +20,38 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain/forkchoice:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/operations:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/flags:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
@@ -53,8 +71,11 @@ go_test(
size = "medium",
srcs = [
"chain_info_test.go",
"head_test.go",
"init_sync_process_block_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
],
embed = [":go_default_library"],
@@ -67,8 +88,9 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
@@ -77,9 +99,11 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@org_golang_x_net//context:go_default_library",
],
)
@@ -103,7 +127,6 @@ go_test(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
@@ -112,6 +135,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",

View File

@@ -1,11 +1,16 @@
package blockchain
import (
"bytes"
"context"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -13,28 +18,24 @@ import (
// directly retrieves chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
CanonicalRootFetcher
FinalizationFetcher
}
// GenesisTimeFetcher retrieves the Eth2 genesis timestamp.
type GenesisTimeFetcher interface {
// TimeFetcher retrieves the Eth2 data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
CurrentSlot() uint64
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieves head related data.
type HeadFetcher interface {
HeadSlot() uint64
HeadRoot() []byte
HeadBlock() *ethpb.BeaconBlock
HeadState() *pb.BeaconState
}
// CanonicalRootFetcher defines a common interface for methods in blockchain service which
// directly retrieves canonical roots related data.
type CanonicalRootFetcher interface {
CanonicalRoot(slot uint64) []byte
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
HeadState(ctx context.Context) (*state.BeaconState, error)
HeadValidatorsIndices(epoch uint64) ([]uint64, error)
HeadSeed(epoch uint64) ([32]byte, error)
}
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
@@ -43,64 +44,133 @@ type ForkFetcher interface {
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieves finalization related data.
// directly retrieves finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
}
// FinalizedCheckpt returns the latest finalized checkpoint tracked in fork choice service.
// ParticipationFetcher defines a common interface for methods in blockchain service which
// directly retrieves validator participation related data.
type ParticipationFetcher interface {
Participation(epoch uint64) *precompute.Balance
}
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
cp := s.forkChoiceStore.FinalizedCheckpt()
if cp != nil {
return cp
if s.finalizedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
// If head state exists but there hasn't been a finalized check point,
// the check point's root should refer to genesis block root.
if bytes.Equal(s.finalizedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
return &ethpb.Checkpoint{Root: s.genesisRoot[:]}
}
return state.CopyCheckpoint(s.finalizedCheckpt)
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
if s.justifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
// If head state exists but there hasn't been a justified check point,
// the check point root should refer to genesis block root.
if bytes.Equal(s.justifiedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
return &ethpb.Checkpoint{Root: s.genesisRoot[:]}
}
return state.CopyCheckpoint(s.justifiedCheckpt)
}
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
if s.prevJustifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
// If head state exists but there hasn't been a justified check point,
// the check point root should refer to genesis block root.
if bytes.Equal(s.prevJustifiedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
return &ethpb.Checkpoint{Root: s.genesisRoot[:]}
}
return state.CopyCheckpoint(s.prevJustifiedCheckpt)
}
// HeadSlot returns the slot of the head of the chain.
func (s *Service) HeadSlot() uint64 {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return 0
}
return s.headSlot
return s.headSlot()
}
// HeadRoot returns the root of the head of the chain.
func (s *Service) HeadRoot() []byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
root := s.canonicalRoots[s.headSlot]
if len(root) != 0 {
return root
func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
if s.headRoot() != params.BeaconConfig().ZeroHash {
r := s.headRoot()
return r[:], nil
}
return params.BeaconConfig().ZeroHash[:]
b, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
return nil, err
}
if b == nil {
return params.BeaconConfig().ZeroHash[:], nil
}
r, err := ssz.HashTreeRoot(b.Block)
if err != nil {
return nil, err
}
return r[:], nil
}
// HeadBlock returns the head block of the chain.
func (s *Service) HeadBlock() *ethpb.BeaconBlock {
s.headLock.RLock()
defer s.headLock.RUnlock()
// If the head state is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
if s.hasHeadState() {
return s.headBlock(), nil
}
return proto.Clone(s.headBlock).(*ethpb.BeaconBlock)
return s.beaconDB.HeadBlock(ctx)
}
// HeadState returns the head state of the chain.
func (s *Service) HeadState() *pb.BeaconState {
s.headLock.RLock()
defer s.headLock.RUnlock()
// If the head state is nil from service struct,
// it will attempt to get the head state from DB.
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
if s.hasHeadState() {
return s.headState(), nil
}
return proto.Clone(s.headState).(*pb.BeaconState)
return s.beaconDB.HeadState(ctx)
}
// CanonicalRoot returns the canonical root of a given slot.
func (s *Service) CanonicalRoot(slot uint64) []byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
if !s.hasHeadState() {
return []uint64{}, nil
}
return helpers.ActiveValidatorIndices(s.headState(), epoch)
}
return s.canonicalRoots[slot]
// HeadSeed returns the seed from the head view of a given epoch.
func (s *Service) HeadSeed(epoch uint64) ([32]byte, error) {
if !s.hasHeadState() {
return [32]byte{}, nil
}
return helpers.Seed(s.headState(), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// GenesisTime returns the genesis time of beacon chain.
@@ -110,11 +180,19 @@ func (s *Service) GenesisTime() time.Time {
// CurrentFork retrieves the latest fork information of the beacon chain.
func (s *Service) CurrentFork() *pb.Fork {
if s.headState == nil {
if !s.hasHeadState() {
return &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
}
return proto.Clone(s.headState.Fork).(*pb.Fork)
return s.headState().Fork()
}
// Participation returns the participation stats of a given epoch.
func (s *Service) Participation(epoch uint64) *precompute.Balance {
s.epochParticipationLock.RLock()
defer s.epochParticipationLock.RUnlock()
return s.epochParticipation[epoch]
}

View File

@@ -4,21 +4,19 @@ import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
)
func TestHeadSlot_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
beaconDB: db,
canonicalRoots: make(map[uint64][]byte),
beaconDB: db,
}
go func() {
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 777},
[32]byte{},
)
}()
@@ -29,49 +27,47 @@ func TestHeadRoot_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
beaconDB: db,
canonicalRoots: make(map[uint64][]byte),
beaconDB: db,
head: &head{root: [32]byte{'A'}},
}
go func() {
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 777},
[32]byte{},
)
}()
s.HeadRoot()
if _, err := s.HeadRoot(context.Background()); err != nil {
t.Fatal(err)
}
}
func TestHeadBlock_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
beaconDB: db,
canonicalRoots: make(map[uint64][]byte),
beaconDB: db,
head: &head{block: &ethpb.SignedBeaconBlock{}},
}
go func() {
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 777},
[32]byte{},
)
}()
s.HeadBlock()
s.HeadBlock(context.Background())
}
func TestHeadState_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
beaconDB: db,
canonicalRoots: make(map[uint64][]byte),
beaconDB: db,
}
go func() {
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 777},
[32]byte{},
)
}()
s.HeadState()
s.HeadState(context.Background())
}

View File

@@ -7,27 +7,37 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
// Ensure Service implements chain info interface.
var _ = ChainInfoFetcher(&Service{})
var _ = GenesisTimeFetcher(&Service{})
var _ = TimeFetcher(&Service{})
var _ = ForkFetcher(&Service{})
func TestFinalizedCheckpt_Nil(t *testing.T) {
c := setupBeaconChain(t, nil)
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
c := setupBeaconChain(t, db)
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
t.Error("Incorrect pre chain start value")
}
}
func TestHeadRoot_Nil(t *testing.T) {
c := setupBeaconChain(t, nil)
if !bytes.Equal(c.HeadRoot(), params.BeaconConfig().ZeroHash[:]) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
c := setupBeaconChain(t, db)
headRoot, err := c.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(headRoot, params.BeaconConfig().ZeroHash[:]) {
t.Error("Incorrect pre chain start value")
}
}
@@ -35,48 +45,132 @@ func TestHeadRoot_Nil(t *testing.T) {
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
cp := &ethpb.Checkpoint{Epoch: 5, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c.finalizedCheckpt = cp
if err := c.forkChoiceStore.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
if c.FinalizedCheckpt().Epoch != cp.Epoch {
t.Errorf("Finalized epoch at genesis should be %d, got: %d", cp.Epoch, c.FinalizedCheckpt().Epoch)
}
}
if c.FinalizedCheckpt().Epoch != 0 {
t.Errorf("Finalized epoch at genesis should be 0, got: %d", c.FinalizedCheckpt().Epoch)
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c.finalizedCheckpt = cp
c.genesisRoot = genesisRoot
if !bytes.Equal(c.FinalizedCheckpt().Root, c.genesisRoot[:]) {
t.Errorf("Got: %v, wanted: %v", c.FinalizedCheckpt().Root, c.genesisRoot[:])
}
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cp := &ethpb.Checkpoint{Epoch: 6, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c.justifiedCheckpt = cp
if c.CurrentJustifiedCheckpt().Epoch != cp.Epoch {
t.Errorf("Current Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.CurrentJustifiedCheckpt().Epoch)
}
}
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c.justifiedCheckpt = cp
c.genesisRoot = genesisRoot
if !bytes.Equal(c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:]) {
t.Errorf("Got: %v, wanted: %v", c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:])
}
}
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cp := &ethpb.Checkpoint{Epoch: 7, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c.prevJustifiedCheckpt = cp
if c.PreviousJustifiedCheckpt().Epoch != cp.Epoch {
t.Errorf("Previous Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.PreviousJustifiedCheckpt().Epoch)
}
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c.prevJustifiedCheckpt = cp
c.genesisRoot = genesisRoot
if !bytes.Equal(c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:]) {
t.Errorf("Got: %v, wanted: %v", c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:])
}
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
c.headSlot = 100
s, _ := state.InitializeFromProto(&pb.BeaconState{})
c.head = &head{slot: 100, state: s}
if c.HeadSlot() != 100 {
t.Errorf("Wanted head slot: %d, got: %d", 100, c.HeadSlot())
}
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
c := &Service{canonicalRoots: make(map[uint64][]byte)}
c.headSlot = 100
c.canonicalRoots[c.headSlot] = []byte{'A'}
if !bytes.Equal([]byte{'A'}, c.HeadRoot()) {
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.HeadRoot())
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
if [32]byte{'A'} != c.headRoot() {
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.headRoot())
}
}
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := &ethpb.BeaconBlock{Slot: 1}
c := &Service{headBlock: b}
if !reflect.DeepEqual(b, c.HeadBlock()) {
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
s, _ := state.InitializeFromProto(&pb.BeaconState{})
c := &Service{}
c.head = &head{block: b, state: s}
recevied, err := c.HeadBlock(context.Background())
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(b, recevied) {
t.Error("incorrect head block received")
}
}
func TestHeadState_CanRetrieve(t *testing.T) {
s := &pb.BeaconState{Slot: 2}
c := &Service{headState: s}
if !reflect.DeepEqual(s, c.HeadState()) {
s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2})
if err != nil {
t.Fatal(err)
}
c := &Service{}
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
t.Error("incorrect head state received")
}
}
@@ -91,19 +185,13 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &pb.Fork{Epoch: 999}
s := &pb.BeaconState{Fork: f}
c := &Service{headState: s}
if !reflect.DeepEqual(c.CurrentFork(), f) {
t.Error("Recieved incorrect fork version")
}
}
func TestCanonicalRoot_CanRetrieve(t *testing.T) {
c := &Service{canonicalRoots: make(map[uint64][]byte)}
slot := uint64(123)
r := []byte{'B'}
c.canonicalRoots[slot] = r
if !bytes.Equal(r, c.CanonicalRoot(slot)) {
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.CanonicalRoot(slot))
s, err := state.InitializeFromProto(&pb.BeaconState{Fork: f})
if err != nil {
t.Fatal(err)
}
c := &Service{}
c.head = &head{state: s}
if !proto.Equal(c.CurrentFork(), f) {
t.Error("Received incorrect fork version")
}
}

View File

@@ -1,178 +0,0 @@
package forkchoice
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
func BenchmarkForkChoiceTree1(b *testing.B) {
ctx := context.Background()
db := testDB.SetupDB(b)
defer testDB.TeardownDB(b, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
b.Fatal(err)
}
// Benchmark fork choice with 1024 validators
validators := make([]*ethpb.Validator, 1024)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
b.Fatal(err)
}
store.justifiedCheckpt.Root = roots[0]
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
b.Fatal(err)
}
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: store.justifiedCheckpt,
State: s,
}); err != nil {
b.Fatal(err)
}
// Spread out the votes evenly for all 3 leaf nodes
for i := 0; i < len(validators); i++ {
switch {
case i < 256:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
b.Fatal(err)
}
case i > 768:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
b.Fatal(err)
}
default:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
b.Fatal(err)
}
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := store.Head(ctx)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkForkChoiceTree2(b *testing.B) {
ctx := context.Background()
db := testDB.SetupDB(b)
defer testDB.TeardownDB(b, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree2(db)
if err != nil {
b.Fatal(err)
}
// Benchmark fork choice with 1024 validators
validators := make([]*ethpb.Validator, 1024)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
b.Fatal(err)
}
store.justifiedCheckpt.Root = roots[0]
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
b.Fatal(err)
}
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: store.justifiedCheckpt,
State: s,
}); err != nil {
b.Fatal(err)
}
// Spread out the votes evenly for all the leaf nodes. 8 to 15
nodeIndex := 8
for i := 0; i < len(validators); i++ {
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[nodeIndex]}); err != nil {
b.Fatal(err)
}
if i%155 == 0 {
nodeIndex++
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := store.Head(ctx)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkForkChoiceTree3(b *testing.B) {
ctx := context.Background()
db := testDB.SetupDB(b)
defer testDB.TeardownDB(b, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree3(db)
if err != nil {
b.Fatal(err)
}
// Benchmark fork choice with 1024 validators
validators := make([]*ethpb.Validator, 1024)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
b.Fatal(err)
}
store.justifiedCheckpt.Root = roots[0]
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
b.Fatal(err)
}
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: store.justifiedCheckpt,
State: s,
}); err != nil {
b.Fatal(err)
}
// All validators vote on the same head
for i := 0; i < len(validators); i++ {
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := store.Head(ctx)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -1,9 +0,0 @@
/*
Package forkchoice implements the Latest Message Driven GHOST (Greediest Heaviest Observed
Sub-Tree) algorithm as the Ethereum Serenity beacon chain fork choice rule. This algorithm is designed to
properly detect the canonical chain based on validator votes even in the presence of high network
latency, network partitions, and many conflicting blocks. To read more about fork choice, read the
official accompanying document:
https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_fork-choice.md
*/
package forkchoice

View File

@@ -1,59 +0,0 @@
test_cases:
# GHOST chooses b3 with the heaviest weight
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b1'
- id: 'b3'
parent: 'b1'
weights:
b0: 0
b1: 0
b2: 5
b3: 10
head: 'b3'
# GHOST chooses b1 with the heaviest weight
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
weights:
b1: 5
b2: 4
b3: 3
head: 'b1'
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b3
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
weights:
b1: 5
b2: 6
b3: 6
head: 'b3'
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b1
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
weights:
b1: 0
b2: 0
head: 'b2'

View File

@@ -1,140 +0,0 @@
package forkchoice
import (
"bytes"
"context"
"io/ioutil"
"path/filepath"
"strconv"
"testing"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"gopkg.in/yaml.v2"
)
type Config struct {
TestCases []struct {
Blocks []struct {
ID string `yaml:"id"`
Parent string `yaml:"parent"`
} `yaml:"blocks"`
Weights map[string]int `yaml:"weights"`
Head string `yaml:"head"`
} `yaml:"test_cases"`
}
func TestGetHeadFromYaml(t *testing.T) {
ctx := context.Background()
filename, _ := filepath.Abs("./lmd_ghost_test.yaml")
yamlFile, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatal(err)
}
var c *Config
err = yaml.Unmarshal(yamlFile, &c)
for _, test := range c.TestCases {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
blksRoot := make(map[int][]byte)
// Construct block tree from yaml.
for _, blk := range test.Blocks {
// genesis block condition
if blk.ID == blk.Parent {
b := &ethpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
if err := db.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
root, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
blksRoot[0] = root[:]
} else {
slot, err := strconv.Atoi(blk.ID[1:])
if err != nil {
t.Fatal(err)
}
parentSlot, err := strconv.Atoi(blk.Parent[1:])
if err != nil {
t.Fatal(err)
}
b := &ethpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}
if err := db.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
root, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
blksRoot[slot] = root[:]
}
}
// Assign validator votes to the blocks as weights.
count := 0
for blk, votes := range test.Weights {
slot, err := strconv.Atoi(blk[1:])
if err != nil {
t.Fatal(err)
}
max := count + votes
for i := count; i < max; i++ {
if err := db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: blksRoot[slot]}); err != nil {
t.Fatal(err)
}
count++
}
}
store := NewForkChoiceService(ctx, db)
validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
store.justifiedCheckpt.Root = blksRoot[0]
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
t.Fatal(err)
}
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: store.justifiedCheckpt,
State: s,
}); err != nil {
t.Fatal(err)
}
head, err := store.Head(ctx)
if err != nil {
t.Fatal(err)
}
headSlot, err := strconv.Atoi(test.Head[1:])
if err != nil {
t.Fatal(err)
}
wantedHead := blksRoot[headSlot]
if !bytes.Equal(head, wantedHead) {
t.Errorf("wanted root %#x, got root %#x", wantedHead, head)
}
helpers.ClearAllCaches()
testDB.TeardownDB(t, db)
}
}

View File

@@ -1,40 +0,0 @@
package forkchoice
import (
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "forkchoice")
// logs epoch related data during epoch boundary.
func logEpochData(beaconState *pb.BeaconState) {
log.WithFields(logrus.Fields{
"epoch": helpers.CurrentEpoch(beaconState),
"finalizedEpoch": beaconState.FinalizedCheckpoint.Epoch,
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint.Epoch,
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint.Epoch,
}).Info("Starting next epoch")
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
if err != nil {
log.WithError(err).Error("Could not get active validator indices")
return
}
log.WithFields(logrus.Fields{
"totalValidators": len(beaconState.Validators),
"activeValidators": len(activeVals),
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances)),
}).Info("Validator registry information")
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -1,92 +0,0 @@
package forkchoice
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
var (
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_finalized_epoch",
Help: "Last finalized epoch of the processed state",
})
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_finalized_root",
Help: "Last finalized root of the processed state",
})
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_justified_epoch",
Help: "Current justified epoch of the processed state",
})
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_justified_root",
Help: "Current justified root of the processed state",
})
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_previous_justified_epoch",
Help: "Previous justified epoch of the processed state",
})
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_previous_justified_root",
Help: "Previous justified root of the processed state",
})
activeValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "state_active_validators",
Help: "Total number of active validators",
})
slashedValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "state_slashed_validators",
Help: "Total slashed validators",
})
withdrawnValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "state_withdrawn_validators",
Help: "Total withdrawn validators",
})
totalValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_validators",
Help: "Number of status=pending|active|exited|withdrawable validators in current epoch",
})
)
func reportEpochMetrics(state *pb.BeaconState) {
currentEpoch := state.Slot / params.BeaconConfig().SlotsPerEpoch
// Validator counts
var active float64
var slashed float64
var withdrawn float64
for _, v := range state.Validators {
if v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch {
active++
}
if v.Slashed {
slashed++
}
if currentEpoch >= v.ExitEpoch {
withdrawn++
}
}
activeValidatorsGauge.Set(active)
slashedValidatorsGauge.Set(slashed)
withdrawnValidatorsGauge.Set(withdrawn)
totalValidatorsGauge.Set(float64(len(state.Validators)))
// Last justified slot
if state.CurrentJustifiedCheckpoint != nil {
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint.Epoch))
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint.Root)))
}
// Last previous justified slot
if state.PreviousJustifiedCheckpoint != nil {
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint.Epoch))
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint.Root)))
}
// Last finalized slot
if state.FinalizedCheckpoint != nil {
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpoint.Epoch))
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint.Root)))
}
}

View File

@@ -1,293 +0,0 @@
package forkchoice
import (
"context"
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// OnAttestation is called whenever an attestation is received, it updates validators latest vote,
// as well as the fork choice store struct.
//
// Spec pseudocode definition:
// def on_attestation(store: Store, attestation: Attestation) -> None:
// target = attestation.data.target
//
// # Cannot calculate the current shuffling if have not seen the target
// assert target.root in store.blocks
//
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
// base_state = store.block_states[target.root].copy()
// assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT
//
// # Store target checkpoint state if not yet seen
// if target not in store.checkpoint_states:
// process_slots(base_state, compute_start_slot_of_epoch(target.epoch))
// store.checkpoint_states[target] = base_state
// target_state = store.checkpoint_states[target]
//
// # Attestations can only affect the fork choice of subsequent slots.
// # Delay consideration in the fork choice until their slot is in the past.
// attestation_slot = get_attestation_data_slot(target_state, attestation.data)
// assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
//
// # Get state at the `target` to validate attestation and calculate the committees
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Update latest messages
// for i in indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices:
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.onAttestation")
defer span.End()
tgt := proto.Clone(a.Data.Target).(*ethpb.Checkpoint)
tgtSlot := helpers.StartSlot(tgt.Epoch)
// Verify beacon node has seen the target block before.
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
return 0, fmt.Errorf("target root %#x does not exist in db", bytesutil.Trunc(tgt.Root))
}
// Verify attestation target has had a valid pre state produced by the target block.
baseState, err := s.verifyAttPreState(ctx, tgt)
if err != nil {
return 0, err
}
// Verify Attestations cannot be from future epochs.
if err := helpers.VerifySlotTime(baseState.GenesisTime, tgtSlot); err != nil {
return 0, errors.Wrap(err, "could not verify attestation target slot")
}
// Store target checkpoint state if not yet seen.
baseState, err = s.saveCheckpointState(ctx, baseState, tgt)
if err != nil {
return 0, err
}
// Delay attestation processing until the subsequent slot.
if err := s.waitForAttInclDelay(ctx, a, baseState); err != nil {
return 0, err
}
// Verify attestations can only affect the fork choice of subsequent slots.
if err := helpers.VerifySlotTime(baseState.GenesisTime, a.Data.Slot+1); err != nil {
return 0, err
}
s.attsQueueLock.Lock()
defer s.attsQueueLock.Unlock()
atts := make([]*ethpb.Attestation, 0, len(s.attsQueue))
for root, a := range s.attsQueue {
log := log.WithFields(logrus.Fields{
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
"Root": fmt.Sprintf("%#x", root),
})
log.Debug("Updating latest votes")
// Use the target state to to validate attestation and calculate the committees.
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
if err != nil {
log.WithError(err).Warn("Removing attestation from queue.")
delete(s.attsQueue, root)
continue
}
// Update every validator's latest vote.
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
return 0, err
}
// Mark attestation as seen we don't update votes when it appears in block.
if err := s.setSeenAtt(a); err != nil {
return 0, err
}
delete(s.attsQueue, root)
att, err := s.aggregatedAttestations(ctx, a)
if err != nil {
return 0, err
}
atts = append(atts, att...)
}
if err := s.db.SaveAttestations(ctx, atts); err != nil {
return 0, err
}
return tgtSlot, nil
}
// verifyAttPreState validates input attested check point has a valid pre-state.
func (s *Store) verifyAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
return baseState, nil
}
// saveCheckpointState saves and returns the processed state with the associated check point.
func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconState, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
s.checkpointStateLock.Lock()
defer s.checkpointStateLock.Unlock()
cachedState, err := s.checkpointState.StateByCheckpoint(c)
if err != nil {
return nil, errors.Wrap(err, "could not get cached checkpoint state")
}
if cachedState != nil {
return cachedState, nil
}
// Advance slots only when it's higher than current state slot.
if helpers.StartSlot(c.Epoch) > baseState.Slot {
stateCopy := proto.Clone(baseState).(*pb.BeaconState)
baseState, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
}
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: baseState,
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
// waitForAttInclDelay waits until the next slot because attestation can only affect
// fork choice of subsequent slot. This is to delay attestation inclusion for fork choice
// until the attested slot is in the past.
func (s *Store) waitForAttInclDelay(ctx context.Context, a *ethpb.Attestation, targetState *pb.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.forkchoice.waitForAttInclDelay")
defer span.End()
nextSlot := a.Data.Slot + 1
duration := time.Duration(nextSlot*params.BeaconConfig().SecondsPerSlot) * time.Second
timeToInclude := time.Unix(int64(targetState.GenesisTime), 0).Add(duration)
if err := s.aggregateAttestation(ctx, a); err != nil {
return errors.Wrap(err, "could not aggregate attestation")
}
time.Sleep(time.Until(timeToInclude))
return nil
}
// aggregateAttestation aggregates the attestations in the pending queue.
func (s *Store) aggregateAttestation(ctx context.Context, att *ethpb.Attestation) error {
s.attsQueueLock.Lock()
defer s.attsQueueLock.Unlock()
root, err := ssz.HashTreeRoot(att.Data)
if err != nil {
return err
}
if a, ok := s.attsQueue[root]; ok {
a, err := helpers.AggregateAttestation(a, att)
if err != nil {
return nil
}
s.attsQueue[root] = a
return nil
}
s.attsQueue[root] = proto.Clone(att).(*ethpb.Attestation)
return nil
}
// verifyAttestation validates input attestation is valid.
func (s *Store) verifyAttestation(ctx context.Context, baseState *pb.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, a)
if err != nil {
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
}
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
return nil, errors.Wrap(err, "could not verify indexed attestation")
}
return indexedAtt, nil
}
// updateAttVotes updates validator's latest votes based on the incoming attestation.
func (s *Store) updateAttVotes(
ctx context.Context,
indexedAtt *ethpb.IndexedAttestation,
tgtRoot []byte,
tgtEpoch uint64) error {
indices := append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...)
newVoteIndices := make([]uint64, 0, len(indices))
newVotes := make([]*pb.ValidatorLatestVote, 0, len(indices))
for _, i := range indices {
vote, err := s.db.ValidatorLatestVote(ctx, i)
if err != nil {
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
}
if vote == nil || tgtEpoch > vote.Epoch {
newVotes = append(newVotes, &pb.ValidatorLatestVote{
Epoch: tgtEpoch,
Root: tgtRoot,
})
newVoteIndices = append(newVoteIndices, i)
}
}
return s.db.SaveValidatorLatestVotes(ctx, newVoteIndices, newVotes)
}
// setSeenAtt sets the attestation hash in seen attestation map to true.
func (s *Store) setSeenAtt(a *ethpb.Attestation) error {
s.seenAttsLock.Lock()
defer s.seenAttsLock.Unlock()
r, err := hashutil.HashProto(a)
if err != nil {
return err
}
s.seenAtts[r] = true
return nil
}
// aggregatedAttestation returns the aggregated attestation after checking saved one in db.
func (s *Store) aggregatedAttestations(ctx context.Context, att *ethpb.Attestation) ([]*ethpb.Attestation, error) {
r, err := ssz.HashTreeRoot(att.Data)
if err != nil {
return nil, err
}
saved, err := s.db.AttestationsByDataRoot(ctx, r)
if err != nil {
return nil, err
}
if saved == nil {
return []*ethpb.Attestation{att}, nil
}
aggregated, err := helpers.AggregateAttestations(append(saved, att))
if err != nil {
return nil, err
}
return aggregated, nil
}

View File

@@ -1,260 +0,0 @@
package forkchoice
import (
"bytes"
"context"
"reflect"
"strings"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestStore_OnAttestation(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
_, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
BlkWithOutState := &ethpb.BeaconBlock{Slot: 0}
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
t.Fatal(err)
}
BlkWithOutStateRoot, _ := ssz.SigningRoot(BlkWithOutState)
BlkWithStateBadAtt := &ethpb.BeaconBlock{Slot: 1}
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
t.Fatal(err)
}
BlkWithStateBadAttRoot, _ := ssz.SigningRoot(BlkWithStateBadAtt)
if err := store.db.SaveState(ctx, &pb.BeaconState{}, BlkWithStateBadAttRoot); err != nil {
t.Fatal(err)
}
BlkWithValidState := &ethpb.BeaconBlock{Slot: 2}
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
t.Fatal(err)
}
BlkWithValidStateRoot, _ := ssz.SigningRoot(BlkWithValidState)
if err := store.db.SaveState(ctx, &pb.BeaconState{
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}, BlkWithValidStateRoot); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
a *ethpb.Attestation
s *pb.BeaconState
wantErr bool
wantErrString string
}{
{
name: "attestation's target root not in db",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: []byte{'A'}}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "target root 0x41 does not exist in db",
},
{
name: "no pre state for attestations's target block",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "pre state of target block 0 does not exist",
},
{
name: "process attestation from future epoch",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Epoch: params.BeaconConfig().FarFutureEpoch,
Root: BlkWithStateBadAttRoot[:]}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "could not process slot from the future",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
_, err := store.OnAttestation(ctx, tt.a)
if tt.wantErr {
if !strings.Contains(err.Error(), tt.wantErrString) {
t.Errorf("Store.OnAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
}
} else {
t.Error(err)
}
})
}
}
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseDemoBeaconConfig()
store := NewForkChoiceService(ctx, db)
s := &pb.BeaconState{
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
LatestBlockHeader: &ethpb.BeaconBlockHeader{},
JustificationBits: []byte{0},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
FinalizedCheckpoint: &ethpb.Checkpoint{},
}
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
s1, err := store.saveCheckpointState(ctx, s, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
s2, err := store.saveCheckpointState(ctx, s, cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
}
s1, err = store.saveCheckpointState(ctx, nil, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
}
s1, err = store.checkpointState.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
}
s2, err = store.checkpointState.StateByCheckpoint(cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
}
s.Slot = params.BeaconConfig().SlotsPerEpoch + 1
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
s3, err := store.saveCheckpointState(ctx, s, cp3)
if err != nil {
t.Fatal(err)
}
if s3.Slot != s.Slot {
t.Errorf("Wanted state slot: %d, got: %d", s.Slot, s3.Slot)
}
}
func TestStore_AggregateAttestation(t *testing.T) {
_, _, privKeys := testutil.SetupInitialDeposits(t, 100)
f := &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
Epoch: 0,
}
domain := helpers.Domain(f, 0, params.BeaconConfig().DomainBeaconAttester)
sig := privKeys[0].Sign([]byte{}, domain)
store := &Store{attsQueue: make(map[[32]byte]*ethpb.Attestation)}
b1 := bitfield.NewBitlist(8)
b1.SetBitAt(0, true)
a := &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: b1, Signature: sig.Marshal()}
if err := store.aggregateAttestation(context.Background(), a); err != nil {
t.Fatal(err)
}
r, _ := ssz.HashTreeRoot(a.Data)
if !bytes.Equal(store.attsQueue[r].AggregationBits, b1) {
t.Error("Received incorrect aggregation bitfield")
}
b2 := bitfield.NewBitlist(8)
b2.SetBitAt(1, true)
a = &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: b2, Signature: sig.Marshal()}
if err := store.aggregateAttestation(context.Background(), a); err != nil {
t.Fatal(err)
}
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{3, 1}) {
t.Error("Received incorrect aggregation bitfield")
}
b3 := bitfield.NewBitlist(8)
b3.SetBitAt(7, true)
a = &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: b3, Signature: sig.Marshal()}
if err := store.aggregateAttestation(context.Background(), a); err != nil {
t.Fatal(err)
}
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{131, 1}) {
t.Error("Received incorrect aggregation bitfield")
}
}
func TestStore_ReturnAggregatedAttestation(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
a1 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x02}}
err := store.db.SaveAttestation(ctx, a1)
if err != nil {
t.Fatal(err)
}
a2 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x03}}
saved, err := store.aggregatedAttestations(ctx, a2)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
t.Error("did not retrieve saved attestation")
}
}

View File

@@ -1,442 +0,0 @@
package forkchoice
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// OnBlock is called when a gossip block is received. It runs regular state transition on the block and
// update fork choice store.
//
// Spec pseudocode definition:
// def on_block(store: Store, block: BeaconBlock) -> None:
// # Make a copy of the state to avoid mutability issues
// assert block.parent_root in store.block_states
// pre_state = store.block_states[block.parent_root].copy()
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
// # Add new block to the store
// store.blocks[signing_root(block)] = block
// # Check block is a descendant of the finalized block
// assert (
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
// store.finalized_checkpoint.root
// )
// # Check that block is later than the finalized epoch slot
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
// # Check the block is valid and compute the post-state
// state = state_transition(pre_state, block)
// # Add new state for this block to the store
// store.block_states[signing_root(block)] = state
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
defer span.End()
// Retrieve incoming block's pre state.
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := len(preState.Validators)
root, err := ssz.SigningRoot(b)
if err != nil {
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
log.WithFields(logrus.Fields{
"slot": b.Slot,
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
}).Info("Executing state transition on block")
postState, err := state.ExecuteStateTransition(ctx, preState, b)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
if err := s.updateBlockAttestationsVotes(ctx, b.Body.Attestations); err != nil {
return errors.Wrap(err, "could not update votes for attestations in block")
}
if err := s.db.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
if err := s.db.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
return errors.Wrap(err, "could not save justified checkpoint")
}
}
// Update finalized check point.
// Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
s.clearSeenAtts()
helpers.ClearAllCaches()
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
// Save the unseen attestations from block to db.
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
return errors.Wrap(err, "could not save attestations")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
if helpers.IsEpochStart(postState.Slot) {
logEpochData(postState)
reportEpochMetrics(postState)
// Update committee shuffled indices at the end of every epoch
if featureconfig.Get().EnableNewCache {
if err := helpers.UpdateCommitteeCache(postState); err != nil {
return err
}
}
}
return nil
}
// OnBlockNoVerifyStateTransition is called when an initial sync block is received.
// It runs state transition on the block and without any BLS verification. The BLS verification
// includes proposer signature, randao and attestation's aggregated signature.
func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
defer span.End()
// Retrieve incoming block's pre state.
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := len(preState.Validators)
log.WithField("slot", b.Slot).Debug("Executing state transition on block")
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, b)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
if err := s.db.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
root, err := ssz.SigningRoot(b)
if err != nil {
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
if err := s.db.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
return errors.Wrap(err, "could not save justified checkpoint")
}
}
// Update finalized check point.
// Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
s.clearSeenAtts()
helpers.ClearAllCaches()
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
}
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
// Save the unseen attestations from block to db.
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
return errors.Wrap(err, "could not save attestations")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
if helpers.IsEpochStart(postState.Slot) {
reportEpochMetrics(postState)
// Update committee shuffled indices at the end of every epoch
if featureconfig.Get().EnableNewCache {
if err := helpers.UpdateCommitteeCache(postState); err != nil {
return err
}
}
}
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
defer span.End()
// Verify incoming block has a valid pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return nil, err
}
// Verify block slot time is not from the feature.
if err := helpers.VerifySlotTime(preState.GenesisTime, b.Slot); err != nil {
return nil, err
}
// Verify block is a descendent of a finalized block.
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
return nil, err
}
// Verify block is later than the finalized epoch slot.
if err := s.verifyBlkFinalizedSlot(b); err != nil {
return nil, err
}
return preState, nil
}
// updateBlockAttestationsVotes checks the attestations in block and filter out the seen ones,
// the unseen ones get passed to updateBlockAttestationVote for updating fork choice votes.
func (s *Store) updateBlockAttestationsVotes(ctx context.Context, atts []*ethpb.Attestation) error {
s.seenAttsLock.Lock()
defer s.seenAttsLock.Unlock()
for _, att := range atts {
// If we have not seen the attestation yet
r, err := hashutil.HashProto(att)
if err != nil {
return err
}
if s.seenAtts[r] {
continue
}
if err := s.updateBlockAttestationVote(ctx, att); err != nil {
log.WithError(err).Warn("Attestation failed to update vote")
}
s.seenAtts[r] = true
}
return nil
}
// updateBlockAttestationVotes checks the attestation to update validator's latest votes.
func (s *Store) updateBlockAttestationVote(ctx context.Context, att *ethpb.Attestation) error {
tgt := att.Data.Target
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(tgt.Root))
if err != nil {
return errors.Wrap(err, "could not get state for attestation tgt root")
}
if baseState == nil {
return errors.New("no state found in db with attestation tgt root")
}
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, att)
if err != nil {
return errors.Wrap(err, "could not convert attestation to indexed attestation")
}
for _, i := range append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...) {
vote, err := s.db.ValidatorLatestVote(ctx, i)
if err != nil {
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
}
if vote == nil || tgt.Epoch > vote.Epoch {
if err := s.db.SaveValidatorLatestVote(ctx, i, &pb.ValidatorLatestVote{
Epoch: tgt.Epoch,
Root: tgt.Root,
}); err != nil {
return errors.Wrapf(err, "could not save latest vote for validator %d", i)
}
}
}
return nil
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
}
return preState, nil
}
// verifyBlkDescendant validates input block root is a descendant of the
// current finalized block root.
func (s *Store) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
defer span.End()
finalizedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
if err != nil || finalizedBlk == nil {
return errors.Wrap(err, "could not get finalized block")
}
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
traceutil.AnnotateError(span, err)
return err
}
return nil
}
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Store) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if finalizedSlot >= b.Slot {
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
}
return nil
}
// saveNewValidators saves newly added validator index from state to db. Does nothing if validator count has not
// changed.
func (s *Store) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *pb.BeaconState) error {
postStateValidatorCount := len(postState.Validators)
if preStateValidatorCount != postStateValidatorCount {
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
pubKey := postState.Validators[i].PublicKey
if err := s.db.SaveValidatorIndex(ctx, bytesutil.ToBytes48(pubKey), uint64(i)); err != nil {
return errors.Wrapf(err, "could not save activated validator: %d", i)
}
log.WithFields(logrus.Fields{
"index": i,
"pubKey": hex.EncodeToString(bytesutil.Trunc(pubKey)),
"totalValidatorCount": i + 1,
}).Info("New validator index saved in DB")
}
}
return nil
}
// saveNewBlockAttestations saves the new attestations in block to DB.
func (s *Store) saveNewBlockAttestations(ctx context.Context, atts []*ethpb.Attestation) error {
attestations := make([]*ethpb.Attestation, 0, len(atts))
for _, att := range atts {
aggregated, err := s.aggregatedAttestations(ctx, att)
if err != nil {
continue
}
attestations = append(attestations, aggregated...)
}
if err := s.db.SaveAttestations(ctx, atts); err != nil {
return err
}
return nil
}
// clearSeenAtts clears seen attestations map, it gets called upon new finalization.
func (s *Store) clearSeenAtts() {
s.seenAttsLock.Lock()
s.seenAttsLock.Unlock()
s.seenAtts = make(map[[32]byte]bool)
}
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
defer span.End()
if !featureconfig.Get().PruneFinalizedStates {
return nil
}
// Make sure finalized slot is not a skipped slot.
for i := endSlot; i > 0; i-- {
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
b, err := s.db.Blocks(ctx, filter)
if err != nil {
return err
}
if len(b) > 0 {
endSlot = i - 1
break
}
}
// Do not remove genesis state
if startSlot == 0 {
startSlot++
}
// Do not remove finalized state that's in the middle of slot ranges.
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
roots, err := s.db.BlockRoots(ctx, filter)
if err != nil {
return err
}
if err := s.db.DeleteStates(ctx, roots); err != nil {
return err
}
return nil
}

View File

@@ -1,343 +0,0 @@
package forkchoice
import (
"context"
"reflect"
"strings"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func init() {
fc := featureconfig.Get()
fc.PruneFinalizedStates = true
featureconfig.Init(fc)
}
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
randomParentRoot := []byte{'a'}
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot)); err != nil {
t.Fatal(err)
}
randomParentRoot2 := roots[1]
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
t.Fatal(err)
}
validGenesisRoot := []byte{'g'}
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(validGenesisRoot)); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
blk *ethpb.BeaconBlock
s *pb.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: &ethpb.BeaconBlock{},
s: &pb.BeaconState{},
wantErrString: "pre state of slot 0 does not exist",
},
{
name: "block is from the feature",
blk: &ethpb.BeaconBlock{ParentRoot: randomParentRoot, Slot: params.BeaconConfig().FarFutureEpoch},
s: &pb.BeaconState{},
wantErrString: "could not process slot from the future",
},
{
name: "could not get finalized block",
blk: &ethpb.BeaconBlock{ParentRoot: randomParentRoot},
s: &pb.BeaconState{},
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
},
{
name: "same slot as finalized block",
blk: &ethpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
s: &pb.BeaconState{},
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := store.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
store.finalizedCheckpt.Root = roots[0]
err := store.OnBlock(ctx, tt.blk)
if !strings.Contains(err.Error(), tt.wantErrString) {
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
}
})
}
}
func TestStore_SaveNewValidators(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
preCount := 2 // validators 0 and validators 1
s := &pb.BeaconState{Validators: []*ethpb.Validator{
{PublicKey: []byte{0}}, {PublicKey: []byte{1}},
{PublicKey: []byte{2}}, {PublicKey: []byte{3}},
}}
if err := store.saveNewValidators(ctx, preCount, s); err != nil {
t.Fatal(err)
}
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{2})) {
t.Error("Wanted validator saved in db")
}
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{3})) {
t.Error("Wanted validator saved in db")
}
if db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{1})) {
t.Error("validator not suppose to be saved in db")
}
}
func TestStore_UpdateBlockAttestationVote(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), &ethpb.Eth1Data{BlockHash: make([]byte, 32)})
if err != nil {
t.Fatal(err)
}
store := NewForkChoiceService(ctx, db)
r := [32]byte{'A'}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: r[:]},
},
AggregationBits: []byte{255},
CustodyBits: []byte{255},
}
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
t.Fatal(err)
}
indices, err := blocks.ConvertToIndexed(ctx, beaconState, att)
if err != nil {
t.Fatal(err)
}
var attestedIndices []uint64
for _, k := range append(indices.CustodyBit_0Indices, indices.CustodyBit_1Indices...) {
attestedIndices = append(attestedIndices, k)
}
if err := store.updateBlockAttestationVote(ctx, att); err != nil {
t.Fatal(err)
}
for _, i := range attestedIndices {
v, err := store.db.ValidatorLatestVote(ctx, i)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(v.Root, r[:]) {
t.Error("Attested roots don't match")
}
}
}
func TestStore_UpdateBlockAttestationsVote(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), &ethpb.Eth1Data{BlockHash: make([]byte, 32)})
if err != nil {
t.Fatal(err)
}
store := NewForkChoiceService(ctx, db)
r := [32]byte{'A'}
atts := make([]*ethpb.Attestation, 5)
hashes := make([][32]byte, 5)
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: r[:]},
},
AggregationBits: []byte{255},
CustodyBits: []byte{255},
}
h, _ := hashutil.HashProto(atts[i])
hashes[i] = h
}
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
t.Fatal(err)
}
if err := store.updateBlockAttestationsVotes(ctx, atts); err != nil {
t.Fatal(err)
}
for _, h := range hashes {
if !store.seenAtts[h] {
t.Error("Seen attestation did not get recorded")
}
}
}
func TestStore_SavesNewBlockAttestations(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
a1 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}, CustodyBits: bitfield.NewBitlist(2)}
a2 := &ethpb.Attestation{Data: &ethpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}, CustodyBits: bitfield.NewBitlist(2)}
r1, _ := ssz.HashTreeRoot(a1.Data)
r2, _ := ssz.HashTreeRoot(a2.Data)
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
t.Fatal(err)
}
saved, err := store.db.AttestationsByDataRoot(ctx, r1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
t.Error("did not retrieve saved attestation")
}
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
t.Error("did not retrieve saved attestation")
}
a1 = &ethpb.Attestation{Data: &ethpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
a2 = &ethpb.Attestation{Data: &ethpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
t.Fatal(err)
}
saved, err = store.db.AttestationsByDataRoot(ctx, r1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
t.Error("did not retrieve saved attestation")
}
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
t.Error("did not retrieve saved attestation")
}
}
func TestRemoveStateSinceLastFinalized(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
store := NewForkChoiceService(ctx, db)
// Save 100 blocks in DB, each has a state.
numBlocks := 100
totalBlocks := make([]*ethpb.BeaconBlock, numBlocks)
blockRoots := make([][32]byte, 0)
for i := 0; i < len(totalBlocks); i++ {
totalBlocks[i] = &ethpb.BeaconBlock{
Slot: uint64(i),
}
r, err := ssz.SigningRoot(totalBlocks[i])
if err != nil {
t.Fatal(err)
}
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
t.Fatal(err)
}
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
t.Fatal(err)
}
blockRoots = append(blockRoots, r)
}
// New finalized epoch: 1
finalizedEpoch := uint64(1)
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
t.Fatal(err)
}
for _, r := range blockRoots {
s, err := store.db.State(ctx, r)
if err != nil {
t.Fatal(err)
}
// Also verifies genesis state didnt get deleted
if s != nil && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
t.Errorf("State with slot %d should not be in DB", s.Slot)
}
}
// New finalized epoch: 5
newFinalizedEpoch := uint64(5)
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
t.Fatal(err)
}
for _, r := range blockRoots {
s, err := store.db.State(ctx, r)
if err != nil {
t.Fatal(err)
}
// Also verifies genesis state didnt get deleted
if s != nil && s.Slot != newFinalizedSlot && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
t.Errorf("State with slot %d should not be in DB", s.Slot)
}
}
}

View File

@@ -1,257 +0,0 @@
package forkchoice
import (
"bytes"
"context"
"sync"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ForkChoicer defines a common interface for methods useful for directly applying fork choice
// to beacon blocks to compute head.
type ForkChoicer interface {
Head(ctx context.Context) ([]byte, error)
OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error
OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error
OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error)
GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error
FinalizedCheckpt() *ethpb.Checkpoint
}
// Store represents a service struct that handles the forkchoice
// logic of managing the full PoS beacon chain.
type Store struct {
ctx context.Context
cancel context.CancelFunc
db db.Database
justifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
prevFinalizedCheckpt *ethpb.Checkpoint
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
attsQueue map[[32]byte]*ethpb.Attestation
attsQueueLock sync.Mutex
seenAtts map[[32]byte]bool
seenAttsLock sync.Mutex
}
// NewForkChoiceService instantiates a new service instance that will
// be registered into a running beacon node.
func NewForkChoiceService(ctx context.Context, db db.Database) *Store {
ctx, cancel := context.WithCancel(ctx)
return &Store{
ctx: ctx,
cancel: cancel,
db: db,
checkpointState: cache.NewCheckpointStateCache(),
attsQueue: make(map[[32]byte]*ethpb.Attestation),
seenAtts: make(map[[32]byte]bool),
}
}
// GenesisStore initializes the store struct before beacon chain
// starts to advance.
//
// Spec pseudocode definition:
// def get_genesis_store(genesis_state: BeaconState) -> Store:
// genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
// root = signing_root(genesis_block)
// justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
// finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
// return Store(
// time=genesis_state.genesis_time,
// justified_checkpoint=justified_checkpoint,
// finalized_checkpoint=finalized_checkpoint,
// blocks={root: genesis_block},
// block_states={root: genesis_state.copy()},
// checkpoint_states={justified_checkpoint: genesis_state.copy()},
// )
func (s *Store) GenesisStore(
ctx context.Context,
justifiedCheckpoint *ethpb.Checkpoint,
finalizedCheckpoint *ethpb.Checkpoint) error {
s.justifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
s.finalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
s.prevFinalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
justifiedState, err := s.db.State(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
return errors.Wrap(err, "could not retrieve last justified state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: s.justifiedCheckpt,
State: justifiedState,
}); err != nil {
return errors.Wrap(err, "could not save genesis state in check point cache")
}
return nil
}
// ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// return Bytes32() # root is older than queried slot: no results.
func (s *Store) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
defer span.End()
b, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
}
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
// operation can proceed. This is not an error condition.
if b == nil || b.Slot < slot {
return nil, nil
}
if b.Slot == slot {
return root, nil
}
return s.ancestor(ctx, b.ParentRoot, slot)
}
// latestAttestingBalance returns the staked balance of a block from the input block root.
//
// Spec pseudocode definition:
// def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
// state = store.checkpoint_states[store.justified_checkpoint]
// active_indices = get_active_validator_indices(state, get_current_epoch(state))
// return Gwei(sum(
// state.validators[i].effective_balance for i in active_indices
// if (i in store.latest_messages
// and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
// ))
func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.latestAttestingBalance")
defer span.End()
lastJustifiedState, err := s.checkpointState.StateByCheckpoint(s.JustifiedCheckpt())
if err != nil {
return 0, errors.Wrap(err, "could not retrieve cached state via last justified check point")
}
if lastJustifiedState == nil {
return 0, errors.Wrapf(err, "could not get justified state at epoch %d", s.JustifiedCheckpt().Epoch)
}
lastJustifiedEpoch := helpers.CurrentEpoch(lastJustifiedState)
activeIndices, err := helpers.ActiveValidatorIndices(lastJustifiedState, lastJustifiedEpoch)
if err != nil {
return 0, errors.Wrap(err, "could not get active indices for last justified checkpoint")
}
wantedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
if err != nil {
return 0, errors.Wrap(err, "could not get target block")
}
balances := uint64(0)
for _, i := range activeIndices {
vote, err := s.db.ValidatorLatestVote(ctx, i)
if err != nil {
return 0, errors.Wrapf(err, "could not get validator %d's latest vote", i)
}
if vote == nil {
continue
}
wantedRoot, err := s.ancestor(ctx, vote.Root, wantedBlk.Slot)
if err != nil {
return 0, errors.Wrapf(err, "could not get ancestor root for slot %d", wantedBlk.Slot)
}
if bytes.Equal(wantedRoot, root) {
balances += lastJustifiedState.Validators[i].EffectiveBalance
}
}
return balances, nil
}
// Head returns the head of the beacon chain.
//
// Spec pseudocode definition:
// def get_head(store: Store) -> Hash:
// # Execute the LMD-GHOST fork choice
// head = store.justified_checkpoint.root
// justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch)
// while True:
// children = [
// root for root in store.blocks.keys()
// if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot
// ]
// if len(children) == 0:
// return head
// # Sort by latest attesting balance with ties broken lexicographically
// head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
func (s *Store) Head(ctx context.Context) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.head")
defer span.End()
head := s.JustifiedCheckpt().Root
for {
startSlot := s.JustifiedCheckpt().Epoch * params.BeaconConfig().SlotsPerEpoch
filter := filters.NewFilter().SetParentRoot(head).SetStartSlot(startSlot)
children, err := s.db.BlockRoots(ctx, filter)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve children info")
}
if len(children) == 0 {
return head, nil
}
// if a block has one child, then we don't have to lookup anything to
// know that this child will be the best child.
head = children[0][:]
if len(children) > 1 {
highest, err := s.latestAttestingBalance(ctx, head)
if err != nil {
return nil, errors.Wrap(err, "could not get latest balance")
}
for _, child := range children[1:] {
balance, err := s.latestAttestingBalance(ctx, child[:])
if err != nil {
return nil, errors.Wrap(err, "could not get latest balance")
}
// When there's a tie, it's broken lexicographically to favor the higher one.
if balance > highest ||
balance == highest && bytes.Compare(child[:], head) > 0 {
highest = balance
head = child[:]
}
}
}
}
}
// JustifiedCheckpt returns the latest justified check point from fork choice store.
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
return proto.Clone(s.justifiedCheckpt).(*ethpb.Checkpoint)
}
// FinalizedCheckpt returns the latest finalized check point from fork choice store.
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
return proto.Clone(s.finalizedCheckpt).(*ethpb.Checkpoint)
}

View File

@@ -1,346 +0,0 @@
package forkchoice
import (
"bytes"
"context"
"reflect"
"testing"
"time"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
func TestStore_GenesisStoreOk(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
genesisTime := time.Unix(9999, 0)
genesisState := &pb.BeaconState{GenesisTime: uint64(genesisTime.Unix())}
genesisStateRoot, err := ssz.HashTreeRoot(genesisState)
if err != nil {
t.Fatal(err)
}
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
t.Fatal(err)
}
checkPoint := &ethpb.Checkpoint{Root: genesisBlkRoot[:]}
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(store.justifiedCheckpt, checkPoint) {
t.Error("Justified check point from genesis store did not match")
}
if !reflect.DeepEqual(store.finalizedCheckpt, checkPoint) {
t.Error("Finalized check point from genesis store did not match")
}
cachedState, err := store.checkpointState.StateByCheckpoint(checkPoint)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(cachedState, genesisState) {
t.Error("Incorrect genesis state cached")
}
}
func TestStore_AncestorOk(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
type args struct {
root []byte
slot uint64
}
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
tests := []struct {
args *args
want []byte
}{
{args: &args{roots[1], 0}, want: roots[0]},
{args: &args{roots[8], 0}, want: roots[0]},
{args: &args{roots[8], 4}, want: roots[4]},
{args: &args{roots[7], 4}, want: roots[4]},
{args: &args{roots[7], 0}, want: roots[0]},
}
for _, tt := range tests {
got, err := store.ancestor(ctx, tt.args.root, tt.args.slot)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Store.ancestor(ctx, ) = %v, want %v", got, tt.want)
}
}
}
func TestStore_AncestorNotPartOfTheChain(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
root, err := store.ancestor(ctx, roots[8], 1)
if err != nil {
t.Fatal(err)
}
if root != nil {
t.Error("block at slot 1 is not part of the chain")
}
root, err = store.ancestor(ctx, roots[8], 2)
if err != nil {
t.Fatal(err)
}
if root != nil {
t.Error("block at slot 2 is not part of the chain")
}
}
func TestStore_LatestAttestingBalance(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
validators := make([]*ethpb.Validator, 100)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
stateRoot, err := ssz.HashTreeRoot(s)
if err != nil {
t.Fatal(err)
}
b := blocks.NewGenesisBlock(stateRoot[:])
blkRoot, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, s, blkRoot); err != nil {
t.Fatal(err)
}
checkPoint := &ethpb.Checkpoint{Root: blkRoot[:]}
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
t.Fatal(err)
}
// /- B1 (33 votes)
// B0 /- B5 - B7 (33 votes)
// \- B3 - B4 - B6 - B8 (34 votes)
for i := 0; i < len(validators); i++ {
switch {
case i < 33:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
t.Fatal(err)
}
case i > 66:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
t.Fatal(err)
}
default:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
t.Fatal(err)
}
}
}
tests := []struct {
root []byte
want uint64
}{
{root: roots[0], want: 100 * 1e9},
{root: roots[1], want: 33 * 1e9},
{root: roots[3], want: 67 * 1e9},
{root: roots[4], want: 67 * 1e9},
{root: roots[7], want: 33 * 1e9},
{root: roots[8], want: 34 * 1e9},
}
for _, tt := range tests {
got, err := store.latestAttestingBalance(ctx, tt.root)
if err != nil {
t.Fatal(err)
}
if got != tt.want {
t.Errorf("Store.latestAttestingBalance(ctx, ) = %v, want %v", got, tt.want)
}
}
}
func TestStore_ChildrenBlocksFromParentRoot(t *testing.T) {
helpers.ClearAllCaches()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
filter := filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(0)
children, err := store.db.BlockRoots(ctx, filter)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[1]), bytesutil.ToBytes32(roots[3])}) {
t.Error("Did not receive correct children roots")
}
filter = filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(2)
children, err = store.db.BlockRoots(ctx, filter)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[3])}) {
t.Error("Did not receive correct children roots")
}
}
func TestStore_GetHead(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
store := NewForkChoiceService(ctx, db)
roots, err := blockTree1(db)
if err != nil {
t.Fatal(err)
}
validators := make([]*ethpb.Validator, 100)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
}
s := &pb.BeaconState{Validators: validators}
stateRoot, err := ssz.HashTreeRoot(s)
if err != nil {
t.Fatal(err)
}
b := blocks.NewGenesisBlock(stateRoot[:])
blkRoot, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
checkPoint := &ethpb.Checkpoint{Root: blkRoot[:]}
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
t.Fatal(err)
}
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
t.Fatal(err)
}
store.justifiedCheckpt.Root = roots[0]
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: store.justifiedCheckpt,
State: s,
}); err != nil {
t.Fatal(err)
}
// /- B1 (33 votes)
// B0 /- B5 - B7 (33 votes)
// \- B3 - B4 - B6 - B8 (34 votes)
for i := 0; i < len(validators); i++ {
switch {
case i < 33:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
t.Fatal(err)
}
case i > 66:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
t.Fatal(err)
}
default:
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
t.Fatal(err)
}
}
}
// Default head is B8
head, err := store.Head(ctx)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(head, roots[8]) {
t.Error("Incorrect head")
}
// 1 validator switches vote to B7 to gain 34%, enough to switch head
if err := store.db.SaveValidatorLatestVote(ctx, 50, &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
t.Fatal(err)
}
head, err = store.Head(ctx)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(head, roots[7]) {
t.Error("Incorrect head")
}
// 18 validators switches vote to B1 to gain 51%, enough to switch head
for i := 0; i < 18; i++ {
idx := 50 + uint64(i)
if err := store.db.SaveValidatorLatestVote(ctx, idx, &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
t.Fatal(err)
}
}
head, err = store.Head(ctx)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(head, roots[1]) {
t.Log(head)
t.Error("Incorrect head")
}
}

View File

@@ -1,144 +0,0 @@
package forkchoice
import (
"context"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
// blockTree1 constructs the following tree:
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
// (B1, and B3 are all from the same slots)
func blockTree1(db db.Database) ([][]byte, error) {
b0 := &ethpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
r0, _ := ssz.SigningRoot(b0)
b1 := &ethpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
r1, _ := ssz.SigningRoot(b1)
b3 := &ethpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
r3, _ := ssz.SigningRoot(b3)
b4 := &ethpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
r4, _ := ssz.SigningRoot(b4)
b5 := &ethpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
r5, _ := ssz.SigningRoot(b5)
b6 := &ethpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
r6, _ := ssz.SigningRoot(b6)
b7 := &ethpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
r7, _ := ssz.SigningRoot(b7)
b8 := &ethpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
r8, _ := ssz.SigningRoot(b8)
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
if err := db.SaveBlock(context.Background(), b); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
return nil, err
}
}
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
}
// blockTree2 constructs the following tree:
// Scenario graph: shorturl.at/loyP6
//
//digraph G {
// rankdir=LR;
// node [shape="none"];
//
// subgraph blocks {
// rankdir=LR;
// node [shape="box"];
// a->b;
// a->c;
// b->d;
// b->e;
// c->f;
// c->g;
// d->h
// d->i
// d->j
// d->k
// h->l
// h->m
// g->n
// g->o
// e->p
// }
//}
func blockTree2(db db.Database) ([][]byte, error) {
b0 := &ethpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
r0, _ := ssz.SigningRoot(b0)
b1 := &ethpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
r1, _ := ssz.SigningRoot(b1)
b2 := &ethpb.BeaconBlock{Slot: 2, ParentRoot: r0[:]}
r2, _ := ssz.SigningRoot(b2)
b3 := &ethpb.BeaconBlock{Slot: 3, ParentRoot: r1[:]}
r3, _ := ssz.SigningRoot(b3)
b4 := &ethpb.BeaconBlock{Slot: 4, ParentRoot: r1[:]}
r4, _ := ssz.SigningRoot(b4)
b5 := &ethpb.BeaconBlock{Slot: 5, ParentRoot: r2[:]}
r5, _ := ssz.SigningRoot(b5)
b6 := &ethpb.BeaconBlock{Slot: 6, ParentRoot: r2[:]}
r6, _ := ssz.SigningRoot(b6)
b7 := &ethpb.BeaconBlock{Slot: 7, ParentRoot: r3[:]}
r7, _ := ssz.SigningRoot(b7)
b8 := &ethpb.BeaconBlock{Slot: 8, ParentRoot: r3[:]}
r8, _ := ssz.SigningRoot(b8)
b9 := &ethpb.BeaconBlock{Slot: 9, ParentRoot: r3[:]}
r9, _ := ssz.SigningRoot(b9)
b10 := &ethpb.BeaconBlock{Slot: 10, ParentRoot: r3[:]}
r10, _ := ssz.SigningRoot(b10)
b11 := &ethpb.BeaconBlock{Slot: 11, ParentRoot: r4[:]}
r11, _ := ssz.SigningRoot(b11)
b12 := &ethpb.BeaconBlock{Slot: 12, ParentRoot: r6[:]}
r12, _ := ssz.SigningRoot(b12)
b13 := &ethpb.BeaconBlock{Slot: 13, ParentRoot: r6[:]}
r13, _ := ssz.SigningRoot(b13)
b14 := &ethpb.BeaconBlock{Slot: 14, ParentRoot: r7[:]}
r14, _ := ssz.SigningRoot(b14)
b15 := &ethpb.BeaconBlock{Slot: 15, ParentRoot: r7[:]}
r15, _ := ssz.SigningRoot(b15)
for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} {
if err := db.SaveBlock(context.Background(), b); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
return nil, err
}
}
return [][]byte{r0[:], r1[:], r2[:], r3[:], r4[:], r5[:], r6[:], r7[:], r8[:], r9[:], r10[:], r11[:], r12[:], r13[:], r14[:], r15[:]}, nil
}
// blockTree3 constructs a tree that is 512 blocks in a row.
// B0 - B1 - B2 - B3 - .... - B512
func blockTree3(db db.Database) ([][]byte, error) {
blkCount := 512
roots := make([][]byte, 0, blkCount)
blks := make([]*ethpb.BeaconBlock, 0, blkCount)
b0 := &ethpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
r0, _ := ssz.SigningRoot(b0)
roots = append(roots, r0[:])
blks = append(blks, b0)
for i := 1; i < blkCount; i++ {
b := &ethpb.BeaconBlock{Slot: uint64(i), ParentRoot: roots[len(roots)-1]}
r, _ := ssz.SigningRoot(b)
roots = append(roots, r[:])
blks = append(blks, b)
}
for _, b := range blks {
if err := db.SaveBlock(context.Background(), b); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
return nil, err
}
}
return roots, nil
}

View File

@@ -0,0 +1,192 @@
package blockchain
import (
"context"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// This defines the current chain service's view of head.
type head struct {
slot uint64 // current head slot.
root [32]byte // current head root.
block *ethpb.SignedBeaconBlock // current head block.
state *state.BeaconState // current head state.
}
// This gets head from the fork choice service and saves head related items
// (ie root, block, state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
ctx, span := trace.StartSpan(ctx, "blockchain.updateHead")
defer span.End()
// To get the proper head update, a node first checks its best justified
// can become justified. This is designed to prevent bounce attack and
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
}
// Get head from the fork choice service.
f := s.finalizedCheckpt
j := s.justifiedCheckpt
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, bytesutil.ToBytes32(j.Root), balances, f.Epoch)
if err != nil {
return err
}
// Save head to the local service cache.
return s.saveHead(ctx, headRoot)
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockchain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
if headRoot == s.headRoot() {
return nil
}
// If the head state is not available, just return nil.
// There's nothing to cache
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
}
// Get the new head block from DB.
newHeadBlock, err := s.beaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
if newHeadBlock == nil || newHeadBlock.Block == nil {
return errors.New("cannot save nil head block")
}
// Get the new head state from cached state or DB.
var newHeadState *state.BeaconState
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
}
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
}
// Cache the new head info.
s.setHead(headRoot, newHeadBlock, newHeadState)
// Save the new head root to DB.
if err := s.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
return nil
}
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of inital-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte) error {
if b == nil || b.Block == nil {
return errors.New("cannot save nil head block")
}
headState, err := s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
}
s.initSyncStateLock.RUnlock()
}
if headState == nil {
return errors.New("nil head state")
}
s.setHead(r, stateTrie.CopySignedBeaconBlock(b), headState)
return nil
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
s.head = &head{
slot: block.Block.Slot,
root: root,
block: stateTrie.CopySignedBeaconBlock(block),
state: state.Copy(),
}
}
// This returns the head slot.
func (s *Service) headSlot() uint64 {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.slot
}
// This returns the head root.
// It does a full copy on head root for immutability.
func (s *Service) headRoot() [32]byte {
if s.head == nil {
return params.BeaconConfig().ZeroHash
}
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.root
}
// This returns the head block.
// It does a full copy on head block for immutability.
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
s.headLock.RLock()
defer s.headLock.RUnlock()
return stateTrie.CopySignedBeaconBlock(s.head.block)
}
// This returns the head state.
// It does a full copy on head state for immutability.
func (s *Service) headState() *state.BeaconState {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.state.Copy()
}
// Returns true if head state exists.
func (s *Service) hasHeadState() bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head != nil && s.head.state != nil
}

View File

@@ -0,0 +1,72 @@
package blockchain
import (
"bytes"
"context"
"reflect"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestSaveHead_Same(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := setupBeaconChain(t, db)
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
if err := service.saveHead(context.Background(), r); err != nil {
t.Fatal(err)
}
if service.headSlot() != 0 {
t.Error("Head did not stay the same")
}
if service.headRoot() != r {
t.Error("Head did not stay the same")
}
}
func TestSaveHead_Different(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := setupBeaconChain(t, db)
oldRoot := [32]byte{'A'}
service.head = &head{slot: 0, root: oldRoot}
newHeadBlock := &ethpb.BeaconBlock{Slot: 1}
newHeadSignedBlock := &ethpb.SignedBeaconBlock{Block: newHeadBlock}
service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock)
newRoot, _ := ssz.HashTreeRoot(newHeadBlock)
headState, _ := state.InitializeFromProto(&pb.BeaconState{Slot: 1})
service.beaconDB.SaveState(context.Background(), headState, newRoot)
if err := service.saveHead(context.Background(), newRoot); err != nil {
t.Fatal(err)
}
if service.HeadSlot() != 1 {
t.Error("Head did not change")
}
cachedRoot, err := service.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headBlock(), newHeadSignedBlock) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headState().CloneInnerState(), headState.CloneInnerState()) {
t.Error("Head did not change")
}
}

View File

@@ -1,57 +1,83 @@
package blockchain
import (
"bytes"
"encoding/hex"
"fmt"
"net/http"
"sort"
"strconv"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/sirupsen/logrus"
"github.com/emicklei/dot"
)
const latestSlotCount = 10
const template = `<html>
<head>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/viz.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/full.render.js"></script>
<body>
<script type="application/javascript">
var graph = ` + "`%s`;" + `
var viz = new Viz();
viz.renderSVGElement(graph) // reading the graph.
.then(function(element) {
document.body.appendChild(element); // appends to document.
})
.catch(error => {
// Create a new Viz instance (@see Caveats page for more info)
viz = new Viz();
// Possibly display the error
console.error(error);
});
</script>
</head>
</body>
</html>`
// HeadsHandler is a handler to serve /heads page in metrics.
func (s *Service) HeadsHandler(w http.ResponseWriter, _ *http.Request) {
buf := new(bytes.Buffer)
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "Head slot", "Head root"); err != nil {
logrus.WithError(err).Error("Failed to render chain heads page")
return
// TreeHandler is a handler to serve /tree page in metrics.
func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
if s.headState() == nil {
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "---------", "---------"); err != nil {
logrus.WithError(err).Error("Failed to render chain heads page")
return
nodes := s.forkChoiceStore.Nodes()
graph := dot.NewGraph(dot.Directed)
graph.Attr("rankdir", "RL")
graph.Attr("labeljust", "l")
dotNodes := make([]*dot.Node, len(nodes))
avgBalance := uint64(averageBalance(s.headState().Balances()))
for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node.
slot := strconv.Itoa(int(nodes[i].Slot))
weight := strconv.Itoa(int(nodes[i].Weight / 1e9)) // Convert unit Gwei to unit ETH.
votes := strconv.Itoa(int(nodes[i].Weight / 1e9 / avgBalance))
bestDescendent := strconv.Itoa(int(nodes[i].BestDescendent))
index := strconv.Itoa(int(i))
label := "slot: " + slot + "\n index: " + index + "\n bestDescendent: " + bestDescendent + "\n votes: " + votes + "\n weight: " + weight
var dotN dot.Node
if nodes[i].Parent != ^uint64(0) {
dotN = graph.Node(index).Box().Attr("label", label)
}
if nodes[i].Slot == s.headSlot() &&
nodes[i].BestDescendent == ^uint64(0) {
dotN = dotN.Attr("color", "green")
}
dotNodes[i] = &dotN
}
slots := s.latestHeadSlots()
for _, slot := range slots {
r := hex.EncodeToString(bytesutil.Trunc(s.canonicalRoots[uint64(slot)]))
if _, err := fmt.Fprintf(w, "\n %d\t\t%s\t", slot, r); err != nil {
logrus.WithError(err).Error("Failed to render chain heads page")
return
for i := len(nodes) - 1; i >= 0; i-- {
if nodes[i].Parent != ^uint64(0) && nodes[i].Parent < uint64(len(dotNodes)) {
graph.Edge(*dotNodes[i], *dotNodes[nodes[i].Parent])
}
}
w.WriteHeader(http.StatusOK)
if _, err := w.Write(buf.Bytes()); err != nil {
log.WithError(err).Error("Failed to render chain heads page")
w.Header().Set("Content-Type", "text/html")
if _, err := fmt.Fprintf(w, template, graph.String()); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
// This returns the latest head slots in a slice and up to latestSlotCount
func (s *Service) latestHeadSlots() []int {
slots := make([]int, 0, len(s.canonicalRoots))
for k := range s.canonicalRoots {
slots = append(slots, int(k))
}
sort.Ints(slots)
if (len(slots)) > latestSlotCount {
return slots[len(slots)-latestSlotCount:]
}
return slots
}

View File

@@ -0,0 +1,191 @@
package blockchain
import (
"context"
"sort"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
const maxCacheSize = 70
const initialSyncCacheSize = 45
const minimumCacheSize = initialSyncCacheSize / 3
func (s *Service) persistCachedStates(ctx context.Context, numOfStates int) error {
oldStates := make([]*stateTrie.BeaconState, 0, numOfStates)
// Add slots to the map and add epoch boundary states to the slice.
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
oldStates = append(oldStates, s.initSyncState[rt])
}
err := s.beaconDB.SaveStates(ctx, oldStates, s.boundaryRoots[:numOfStates-minimumCacheSize])
if err != nil {
return err
}
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
delete(s.initSyncState, rt)
}
s.boundaryRoots = s.boundaryRoots[numOfStates-minimumCacheSize:]
return nil
}
// filter out boundary candidates from our currently processed batch of states.
func (s *Service) filterBoundaryCandidates(ctx context.Context, root [32]byte, postState *stateTrie.BeaconState) {
// Only trigger on epoch start.
if !helpers.IsEpochStart(postState.Slot()) {
return
}
stateSlice := make([][32]byte, 0, len(s.initSyncState))
// Add epoch boundary roots to slice.
for rt := range s.initSyncState {
stateSlice = append(stateSlice, rt)
}
sort.Slice(stateSlice, func(i int, j int) bool {
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
})
epochLength := params.BeaconConfig().SlotsPerEpoch
if len(s.boundaryRoots) > 0 {
// Retrieve previous boundary root.
previousBoundaryRoot := s.boundaryRoots[len(s.boundaryRoots)-1]
previousState, ok := s.initSyncState[previousBoundaryRoot]
if !ok {
// Remove the non-existent root and exit filtering.
s.boundaryRoots = s.boundaryRoots[:len(s.boundaryRoots)-1]
return
}
previousSlot := previousState.Slot()
// Round up slot number to account for skipped slots.
previousSlot = helpers.RoundUpToNearestEpoch(previousSlot)
if postState.Slot()-previousSlot >= epochLength {
targetSlot := postState.Slot()
tempRoots := s.loopThroughCandidates(stateSlice, previousBoundaryRoot, previousSlot, targetSlot)
s.boundaryRoots = append(s.boundaryRoots, tempRoots...)
}
}
s.boundaryRoots = append(s.boundaryRoots, root)
s.pruneOldStates()
s.pruneNonBoundaryStates()
}
// loop-through the provided candidate roots to filter out which would be appropriate boundary roots.
func (s *Service) loopThroughCandidates(stateSlice [][32]byte, previousBoundaryRoot [32]byte,
previousSlot uint64, targetSlot uint64) [][32]byte {
tempRoots := [][32]byte{}
epochLength := params.BeaconConfig().SlotsPerEpoch
// Loop through current states to filter for valid boundary states.
for i := len(stateSlice) - 1; stateSlice[i] != previousBoundaryRoot && i >= 0; i-- {
currentSlot := s.initSyncState[stateSlice[i]].Slot()
// Skip if the current slot is larger than the previous epoch
// boundary.
if currentSlot > targetSlot-epochLength {
continue
}
tempRoots = append(tempRoots, stateSlice[i])
// Switch target slot if the current slot is greater than
// 1 epoch boundary from the previously saved boundary slot.
if currentSlot > previousSlot+epochLength {
currentSlot = helpers.RoundUpToNearestEpoch(currentSlot)
targetSlot = currentSlot
continue
}
break
}
// Reverse to append the roots in ascending order corresponding
// to the respective slots.
tempRoots = bytesutil.ReverseBytes32Slice(tempRoots)
return tempRoots
}
// prune for states past the current finalized checkpoint.
func (s *Service) pruneOldStates() {
prunedBoundaryRoots := [][32]byte{}
for _, rt := range s.boundaryRoots {
st, ok := s.initSyncState[rt]
// Skip non-existent roots.
if !ok {
continue
}
if st.Slot() < helpers.StartSlot(s.FinalizedCheckpt().Epoch) {
delete(s.initSyncState, rt)
continue
}
prunedBoundaryRoots = append(prunedBoundaryRoots, rt)
}
s.boundaryRoots = prunedBoundaryRoots
}
// prune cache for non-boundary states.
func (s *Service) pruneNonBoundaryStates() {
boundaryMap := make(map[[32]byte]bool)
for i := range s.boundaryRoots {
boundaryMap[s.boundaryRoots[i]] = true
}
for rt := range s.initSyncState {
if !boundaryMap[rt] {
delete(s.initSyncState, rt)
}
}
}
func (s *Service) pruneOldNonFinalizedStates() {
stateSlice := make([][32]byte, 0, len(s.initSyncState))
// Add epoch boundary roots to slice.
for rt := range s.initSyncState {
stateSlice = append(stateSlice, rt)
}
// Sort by slots.
sort.Slice(stateSlice, func(i int, j int) bool {
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
})
boundaryMap := make(map[[32]byte]bool)
for i := range s.boundaryRoots {
boundaryMap[s.boundaryRoots[i]] = true
}
for _, rt := range stateSlice[:initialSyncCacheSize] {
if boundaryMap[rt] {
continue
}
delete(s.initSyncState, rt)
}
}
func (s *Service) generateState(ctx context.Context, startRoot [32]byte, endRoot [32]byte) (*stateTrie.BeaconState, error) {
preState, err := s.beaconDB.State(ctx, startRoot)
if err != nil {
return nil, err
}
if preState == nil {
return nil, errors.New("finalized state does not exist in db")
}
endBlock, err := s.beaconDB.Block(ctx, endRoot)
if err != nil {
return nil, err
}
if endBlock == nil {
return nil, errors.New("provided block root does not have block saved in the db")
}
log.Warnf("Generating missing state of slot %d and root %#x", endBlock.Block.Slot, endRoot)
blocks, err := s.stateGen.LoadBlocks(ctx, preState.Slot()+1, endBlock.Block.Slot, endRoot)
if err != nil {
return nil, errors.Wrap(err, "could not load the required blocks")
}
postState, err := s.stateGen.ReplayBlocks(ctx, preState, blocks, endBlock.Block.Slot)
if err != nil {
return nil, errors.Wrap(err, "could not replay the blocks to generate the resultant state")
}
return postState, nil
}

View File

@@ -0,0 +1,279 @@
package blockchain
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
)
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
for i := uint64(0); i < 500; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
lastIndex := len(service.boundaryRoots) - 1
for i := uint64(500); i < 2000; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
}
// Set current state.
latestSlot := helpers.RoundUpToNearestEpoch(2000)
st.SetSlot(latestSlot)
lastRoot := [32]byte{}
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
service.initSyncState[lastRoot] = st.Copy()
service.finalizedCheckpt = &ethpb.Checkpoint{
Epoch: 0,
Root: []byte{},
}
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
t.Fatal("Wanted non zero added boundary roots")
}
for _, rt := range service.boundaryRoots[lastIndex+1:] {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if !(helpers.IsEpochStart(st.Slot()) || helpers.IsEpochStart(st.Slot()-1) || helpers.IsEpochStart(st.Slot()+1)) {
t.Errorf("boundary roots not validly stored. They have slot %d", st.Slot())
}
}
}
func TestFilterBoundaryCandidates_HandleSkippedSlots(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
for i := uint64(0); i < 500; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
lastIndex := len(service.boundaryRoots) - 1
for i := uint64(500); i < 2000; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
// save only for offsetted slots
if helpers.IsEpochStart(i + 10) {
service.initSyncState[root] = st.Copy()
}
}
// Set current state.
latestSlot := helpers.RoundUpToNearestEpoch(2000)
st.SetSlot(latestSlot)
lastRoot := [32]byte{}
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
service.initSyncState[lastRoot] = st.Copy()
service.finalizedCheckpt = &ethpb.Checkpoint{
Epoch: 0,
Root: []byte{},
}
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
t.Fatal("Wanted non zero added boundary roots")
}
for _, rt := range service.boundaryRoots[lastIndex+1:] {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if st.Slot() >= 500 {
// Ignore head boundary root.
if st.Slot() == 2016 {
continue
}
if !helpers.IsEpochStart(st.Slot() + 10) {
t.Errorf("boundary roots not validly stored. They have slot %d "+
"instead of the offset from epoch start", st.Slot())
}
}
}
}
func TestPruneOldStates_AlreadyFinalized(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
for i := uint64(100); i < 200; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
service.boundaryRoots = append(service.boundaryRoots, root)
}
finalizedEpoch := uint64(5)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: finalizedEpoch}
service.pruneOldStates()
for _, rt := range service.boundaryRoots {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if st.Slot() < helpers.StartSlot(finalizedEpoch) {
t.Errorf("State with slot %d still exists and not pruned", st.Slot())
}
}
}
func TestPruneNonBoundary_CanPrune(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
for i := uint64(0); i < 2000; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
service.pruneNonBoundaryStates()
for _, rt := range service.boundaryRoots {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doesn't exist in cache map")
continue
}
if !helpers.IsEpochStart(st.Slot()) {
t.Errorf("Non boundary state with slot %d still exists and not pruned", st.Slot())
}
}
}
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(context.Background(), cfg)
if err != nil {
t.Fatal(err)
}
beaconState, privs := testutil.DeterministicGenesisState(t, 32)
genesisBlock := blocks.NewGenesisBlock([]byte{})
bodyRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
if err != nil {
t.Fatal(err)
}
beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: genesisBlock.Block.Slot,
ParentRoot: genesisBlock.Block.ParentRoot,
StateRoot: params.BeaconConfig().ZeroHash[:],
BodyRoot: bodyRoot[:],
})
beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector))
cp := beaconState.CurrentJustifiedCheckpoint()
mockRoot := [32]byte{}
copy(mockRoot[:], "hello-world")
cp.Root = mockRoot[:]
beaconState.SetCurrentJustifiedCheckpoint(cp)
beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{})
err = db.SaveBlock(context.Background(), genesisBlock)
if err != nil {
t.Fatal(err)
}
genRoot, err := ssz.HashTreeRoot(genesisBlock)
if err != nil {
t.Fatal(err)
}
err = db.SaveState(context.Background(), beaconState, genRoot)
if err != nil {
t.Fatal(err)
}
lastBlock := &ethpb.SignedBeaconBlock{}
for i := uint64(1); i < 10; i++ {
block, err := testutil.GenerateFullBlock(beaconState, privs, testutil.DefaultBlockGenConfig(), i)
if err != nil {
t.Fatal(err)
}
beaconState, err = state.ExecuteStateTransition(context.Background(), beaconState, block)
if err != nil {
t.Fatal(err)
}
err = db.SaveBlock(context.Background(), block)
if err != nil {
t.Fatal(err)
}
lastBlock = block
}
root, err := ssz.HashTreeRoot(lastBlock.Block)
if err != nil {
t.Fatal(err)
}
newState, err := service.generateState(context.Background(), genRoot, root)
if err != nil {
t.Fatal(err)
}
if !ssz.DeepEqual(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
diff, _ := messagediff.PrettyDiff(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
t.Errorf("Generated state is different from what is expected: %s", diff)
}
}

View File

@@ -1,17 +1,50 @@
package blockchain
import (
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"fmt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b *ethpb.BeaconBlock, r []byte) {
func logStateTransitionData(b *ethpb.BeaconBlock) {
log.WithFields(logrus.Fields{
"slot": b.Slot,
"attestations": len(b.Body.Attestations),
"deposits": len(b.Body.Deposits),
"slot": b.Slot,
"attestations": len(b.Body.Attestations),
"deposits": len(b.Body.Deposits),
"attesterSlashings": len(b.Body.AttesterSlashings),
}).Info("Finished applying state transition")
}
func logEpochData(beaconState *stateTrie.BeaconState) {
log.WithFields(logrus.Fields{
"epoch": helpers.CurrentEpoch(beaconState),
"finalizedEpoch": beaconState.FinalizedCheckpointEpoch(),
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint().Epoch,
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint().Epoch,
}).Info("Starting next epoch")
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
if err != nil {
log.WithError(err).Error("Could not get active validator indices")
return
}
log.WithFields(logrus.Fields{
"totalValidators": len(beaconState.Validators()),
"activeValidators": len(activeVals),
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances())),
}).Info("Validator registry information")
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -3,7 +3,11 @@ package blockchain
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
var (
@@ -15,42 +19,162 @@ var (
Name: "beacon_head_slot",
Help: "Slot of the head block of the beacon chain",
})
beaconHeadRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_head_root",
Help: "Root of the head block of the beacon chain, it returns the lowest 8 bytes interpreted as little endian",
})
competingAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "competing_attestations",
Help: "The # of attestations received and processed from a competing chain",
})
competingBlks = promauto.NewCounter(prometheus.CounterOpts{
Name: "competing_blocks",
Help: "The # of blocks received and processed from a competing chain",
})
processedBlkNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
Name: "processed_no_pubsub_block_counter",
Help: "The # of processed block without pubsub, this usually means the blocks from sync",
headFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "head_finalized_epoch",
Help: "Last finalized epoch of the head state",
})
processedBlkNoPubsubForkchoice = promauto.NewCounter(prometheus.CounterOpts{
Name: "processed_no_pubsub_forkchoice_block_counter",
Help: "The # of processed block without pubsub and forkchoice, this means indicate blocks from initial sync",
headFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "head_finalized_root",
Help: "Last finalized root of the head state",
})
processedBlk = promauto.NewCounter(prometheus.CounterOpts{
Name: "processed_block_counter",
Help: "The # of total processed in block chain service, with fork choice and pubsub",
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_finalized_epoch",
Help: "Last finalized epoch of the processed state",
})
processedAttNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
Name: "processed_no_pubsub_attestation_counter",
Help: "The # of processed attestation without pubsub, this usually means the attestations from sync",
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_finalized_root",
Help: "Last finalized root of the processed state",
})
processedAtt = promauto.NewCounter(prometheus.CounterOpts{
Name: "processed_attestation_counter",
Help: "The # of processed attestation with pubsub and fork choice, this ususally means attestations from rpc",
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_justified_epoch",
Help: "Current justified epoch of the processed state",
})
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_current_justified_root",
Help: "Current justified root of the processed state",
})
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_previous_justified_epoch",
Help: "Previous justified epoch of the processed state",
})
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacon_previous_justified_root",
Help: "Previous justified root of the processed state",
})
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "validator_count",
Help: "The total number of validators",
}, []string{"state"})
validatorsBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "validators_total_balance",
Help: "The total balance of validators, in GWei",
}, []string{"state"})
validatorsEffectiveBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "validators_total_effective_balance",
Help: "The total effective balance of validators, in GWei",
}, []string{"state"})
currentEth1DataDepositCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "current_eth1_data_deposit_count",
Help: "The current eth1 deposit count in the last processed state eth1data field.",
})
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_eligible_balances",
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
})
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_voted_target_balances",
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
})
)
func (s *Service) reportSlotMetrics(currentSlot uint64) {
// reportSlotMetrics reports slot related metrics.
func reportSlotMetrics(currentSlot uint64, headSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
beaconSlot.Set(float64(currentSlot))
beaconHeadSlot.Set(float64(s.HeadSlot()))
beaconHeadRoot.Set(float64(bytesutil.ToLowInt64(s.HeadRoot())))
beaconHeadSlot.Set(float64(headSlot))
if finalizedCheckpoint != nil {
headFinalizedEpoch.Set(float64(finalizedCheckpoint.Epoch))
headFinalizedRoot.Set(float64(bytesutil.ToLowInt64(finalizedCheckpoint.Root)))
}
}
// reportEpochMetrics reports epoch related metrics.
func reportEpochMetrics(state *stateTrie.BeaconState) {
currentEpoch := state.Slot() / params.BeaconConfig().SlotsPerEpoch
// Validator instances
pendingInstances := 0
activeInstances := 0
slashingInstances := 0
slashedInstances := 0
exitingInstances := 0
exitedInstances := 0
// Validator balances
pendingBalance := uint64(0)
activeBalance := uint64(0)
activeEffectiveBalance := uint64(0)
exitingBalance := uint64(0)
exitingEffectiveBalance := uint64(0)
slashingBalance := uint64(0)
slashingEffectiveBalance := uint64(0)
for i, validator := range state.Validators() {
bal, err := state.BalanceAtIndex(uint64(i))
if err != nil {
continue
}
if validator.Slashed {
if currentEpoch < validator.ExitEpoch {
slashingInstances++
slashingBalance += bal
slashingEffectiveBalance += validator.EffectiveBalance
} else {
slashedInstances++
}
continue
}
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
if currentEpoch < validator.ExitEpoch {
exitingInstances++
exitingBalance += bal
exitingEffectiveBalance += validator.EffectiveBalance
} else {
exitedInstances++
}
continue
}
if currentEpoch < validator.ActivationEpoch {
pendingInstances++
pendingBalance += bal
continue
}
activeInstances++
activeBalance += bal
activeEffectiveBalance += validator.EffectiveBalance
}
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
validatorsCount.WithLabelValues("Exited").Set(float64(exitedInstances))
validatorsCount.WithLabelValues("Slashing").Set(float64(slashingInstances))
validatorsCount.WithLabelValues("Slashed").Set(float64(slashedInstances))
validatorsBalance.WithLabelValues("Pending").Set(float64(pendingBalance))
validatorsBalance.WithLabelValues("Active").Set(float64(activeBalance))
validatorsBalance.WithLabelValues("Exiting").Set(float64(exitingBalance))
validatorsBalance.WithLabelValues("Slashing").Set(float64(slashingBalance))
validatorsEffectiveBalance.WithLabelValues("Active").Set(float64(activeEffectiveBalance))
validatorsEffectiveBalance.WithLabelValues("Exiting").Set(float64(exitingEffectiveBalance))
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
// Last justified slot
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint().Epoch))
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint().Root)))
// Last previous justified slot
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint().Epoch))
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint().Root)))
// Last finalized slot
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpointEpoch()))
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint().Root)))
currentEth1DataDepositCount.Set(float64(state.Eth1Data().DepositCount))
if precompute.Balances != nil {
totalEligibleBalances.Set(float64(precompute.Balances.PrevEpoch))
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttesters))
}
}

View File

@@ -0,0 +1,132 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"go.opencensus.io/trace"
)
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
// beacon database.
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
/// it to the DB.
//
// Spec pseudocode definition:
// def on_attestation(store: Service, attestation: Attestation) -> None:
// """
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
//
// An ``attestation`` that is asserted as invalid may be valid at a later time,
// consider scheduling it for later processing in such case.
// """
// target = attestation.data.target
//
// # Attestations must be from the current or previous epoch
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
// # Use GENESIS_EPOCH for previous when genesis to avoid underflow
// previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
// assert target.epoch in [current_epoch, previous_epoch]
// assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
//
// # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
// assert target.root in store.blocks
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
// base_state = store.block_states[target.root].copy()
// assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
//
// # Attestations must be for a known block. If block is unknown, delay consideration until the block is found
// assert attestation.data.beacon_block_root in store.blocks
// # Attestations must not be for blocks in the future. If not, the attestation should not be considered
// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
//
// # Service target checkpoint state if not yet seen
// if target not in store.checkpoint_states:
// process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
// store.checkpoint_states[target] = base_state
// target_state = store.checkpoint_states[target]
//
// # Attestations can only affect the fork choice of subsequent slots.
// # Delay consideration in the fork choice until their slot is in the past.
// assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT
//
// # Get state at the `target` to validate attestation and calculate the committees
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Update latest messages
// for i in indexed_attestation.attesting_indices:
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
ctx, span := trace.StartSpan(ctx, "blockchain.onAttestation")
defer span.End()
tgt := stateTrie.CopyCheckpoint(a.Data.Target)
tgtSlot := helpers.StartSlot(tgt.Epoch)
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
}
// Verify beacon node has seen the target block before.
if !s.hasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
return nil, ErrTargetRootNotInDB
}
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
// save it to the cache.
baseState, err := s.getAttPreState(ctx, tgt)
if err != nil {
return nil, err
}
genesisTime := baseState.GenesisTime()
// Verify attestation target is from current epoch or previous epoch.
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
return nil, err
}
// Verify Attestations cannot be from future epochs.
if err := helpers.VerifySlotTime(genesisTime, tgtSlot); err != nil {
return nil, errors.Wrap(err, "could not verify attestation target slot")
}
// Verify attestation beacon block is known and not from the future.
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
return nil, errors.Wrap(err, "could not verify attestation beacon block")
}
// Verify attestations can only affect the fork choice of subsequent slots.
if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot+1); err != nil {
return nil, err
}
// Use the target state to to validate attestation and calculate the committees.
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
if err != nil {
return nil, err
}
// Only save attestation in DB for archival node.
if flags.Get().EnableArchive {
if err := s.beaconDB.SaveAttestation(ctx, a); err != nil {
return nil, err
}
}
// Update forkchoice store with the new attestation for updating weight.
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
return indexedAtt.AttestingIndices, nil
}

View File

@@ -0,0 +1,146 @@
package blockchain
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
s.checkpointStateLock.Lock()
defer s.checkpointStateLock.Unlock()
cachedState, err := s.checkpointState.StateByCheckpoint(c)
if err != nil {
return nil, errors.Wrap(err, "could not get cached checkpoint state")
}
if cachedState != nil {
return cachedState, nil
}
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
}
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return st, nil
}
}
baseState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
if helpers.StartSlot(c.Epoch) > baseState.Slot() {
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(c.Epoch))
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
}
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: baseState.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
currentEpoch := helpers.SlotToEpoch(currentSlot)
var prevEpoch uint64
// Prevents previous epoch under flow
if currentEpoch > 1 {
prevEpoch = currentEpoch - 1
}
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
return fmt.Errorf("target epoch %d does not match current epoch %d or prev epoch %d", c.Epoch, currentEpoch, prevEpoch)
}
return nil
}
// verifyBeaconBlock verifies beacon head block is known and not from the future.
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
b, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(data.BeaconBlockRoot))
if err != nil {
return err
}
if b == nil || b.Block == nil {
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
}
if b.Block.Slot > data.Slot {
return fmt.Errorf("could not process attestation for future block, %d > %d", b.Block.Slot, data.Slot)
}
return nil
}
// verifyAttestation validates input attestation is valid.
func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, a, committee)
if err != nil {
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
}
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
if err == blocks.ErrSigFailedToVerify {
// When sig fails to verify, check if there's a differences in committees due to
// different seeds.
aState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
epoch := helpers.SlotToEpoch(a.Data.Slot)
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrap(err, "could not get original seed")
}
aSeed, err := helpers.Seed(aState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrap(err, "could not get attester's seed")
}
if origSeed != aSeed {
return nil, fmt.Errorf("could not verify indexed attestation due to differences in seeds: %v != %v",
hex.EncodeToString(bytesutil.Trunc(origSeed[:])), hex.EncodeToString(bytesutil.Trunc(aSeed[:])))
}
}
return nil, errors.Wrap(err, "could not verify indexed attestation")
}
return indexedAtt, nil
}

View File

@@ -0,0 +1,397 @@
package blockchain
import (
"context"
"reflect"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestStore_OnAttestation(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
_, err = blockTree1(db, []byte{'g'})
if err != nil {
t.Fatal(err)
}
BlkWithOutState := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 0}}
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
t.Fatal(err)
}
BlkWithOutStateRoot, _ := ssz.HashTreeRoot(BlkWithOutState.Block)
BlkWithStateBadAtt := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
t.Fatal(err)
}
BlkWithStateBadAttRoot, _ := ssz.HashTreeRoot(BlkWithStateBadAtt.Block)
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{})
if err := s.SetSlot(100 * params.BeaconConfig().SlotsPerEpoch); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot); err != nil {
t.Fatal(err)
}
BlkWithValidState := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
t.Fatal(err)
}
BlkWithValidStateRoot, _ := ssz.HashTreeRoot(BlkWithValidState.Block)
s, _ = stateTrie.InitializeFromProto(&pb.BeaconState{
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err := service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
a *ethpb.Attestation
s *pb.BeaconState
wantErr bool
wantErrString string
}{
{
name: "attestation's data slot not aligned with target vote",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "data slot is not in the same epoch as target 1 != 0",
},
{
name: "attestation's target root not in db",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: []byte{'A'}}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "target root does not exist in db",
},
{
name: "no pre state for attestations's target block",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "pre state of target block 0 does not exist",
},
{
name: "process attestation doesn't match current epoch",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}},
s: &pb.BeaconState{Slot: 100 * params.BeaconConfig().SlotsPerEpoch},
wantErr: true,
wantErrString: "does not match current epoch",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := service.onAttestation(ctx, tt.a)
if tt.wantErr {
if !strings.Contains(err.Error(), tt.wantErrString) {
t.Errorf("Store.onAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
}
} else {
t.Error(err)
}
})
}
}
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseDemoBeaconConfig()
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
StateRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
LatestBlockHeader: &ethpb.BeaconBlockHeader{},
JustificationBits: []byte{0},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
FinalizedCheckpoint: &ethpb.Checkpoint{},
})
r := [32]byte{'g'}
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
t.Fatal(err)
}
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'}))
s1, err := service.getAttPreState(ctx, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'}))
s2, err := service.getAttPreState(ctx, cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
}
s1, err = service.getAttPreState(ctx, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
s1, err = service.checkpointState.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
s2, err = service.checkpointState.StateByCheckpoint(cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
}
s.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'}))
s3, err := service.getAttPreState(ctx, cp3)
if err != nil {
t.Fatal(err)
}
if s3.Slot() != s.Slot() {
t.Errorf("Wanted state slot: %d, got: %d", s.Slot(), s3.Slot())
}
}
func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
epoch := uint64(1)
baseState, _ := testutil.DeterministicGenesisState(t, 1)
baseState.SetSlot(epoch * params.BeaconConfig().SlotsPerEpoch)
checkpoint := &ethpb.Checkpoint{Epoch: epoch}
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root))
returned, err := service.getAttPreState(ctx, checkpoint)
if err != nil {
t.Fatal(err)
}
if baseState.Slot() != returned.Slot() {
t.Error("Incorrectly returned base state")
}
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
if err != nil {
t.Fatal(err)
}
if cached == nil {
t.Error("State should have been cached")
}
epoch = uint64(2)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch}
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root))
returned, err = service.getAttPreState(ctx, newCheckpoint)
if err != nil {
t.Fatal(err)
}
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
if err != nil {
t.Fatal(err)
}
if baseState.Slot() != returned.Slot() {
t.Error("Incorrectly returned base state")
}
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(returned, cached) {
t.Error("Incorrectly cached base state")
}
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
if err := service.verifyAttTargetEpoch(
ctx,
0,
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
&ethpb.Checkpoint{}); err != nil {
t.Error(err)
}
}
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
if err := service.verifyAttTargetEpoch(
ctx,
0,
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
&ethpb.Checkpoint{Epoch: 1}); err != nil {
t.Error(err)
}
}
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
if err := service.verifyAttTargetEpoch(
ctx,
0,
2*params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
&ethpb.Checkpoint{}); !strings.Contains(err.Error(),
"target epoch 0 does not match current epoch 2 or prev epoch 1") {
t.Error("Did not receive wanted error")
}
}
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
d := &ethpb.AttestationData{}
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
t.Error("Did not receive the wanted error")
}
}
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
service.beaconDB.SaveBlock(ctx, b)
r, _ := ssz.HashTreeRoot(b.Block)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "could not process attestation for future block") {
t.Error("Did not receive the wanted error")
}
}
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
service.beaconDB.SaveBlock(ctx, b)
r, _ := ssz.HashTreeRoot(b.Block)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
if err := service.verifyBeaconBlock(ctx, d); err != nil {
t.Error("Did not receive the wanted error")
}
}

View File

@@ -0,0 +1,314 @@
package blockchain
import (
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
//
// Spec pseudocode definition:
// def on_block(store: Store, block: BeaconBlock) -> None:
// # Make a copy of the state to avoid mutability issues
// assert block.parent_root in store.block_states
// pre_state = store.block_states[block.parent_root].copy()
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
// # Add new block to the store
// store.blocks[signing_root(block)] = block
// # Check block is a descendant of the finalized block
// assert (
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
// store.finalized_checkpoint.root
// )
// # Check that block is later than the finalized epoch slot
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
// # Check the block is valid and compute the post-state
// state = state_transition(pre_state, block)
// # Add new state for this block to the store
// store.block_states[signing_root(block)] = state
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
defer span.End()
if signed == nil || signed.Block == nil {
return nil, errors.New("nil block")
}
b := signed.Block
// Retrieve incoming block's pre state.
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return nil, err
}
preStateValidatorCount := preState.NumValidators()
root, err := ssz.HashTreeRoot(b)
if err != nil {
return nil, errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
log.WithFields(logrus.Fields{
"slot": b.Slot,
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
}).Info("Executing state transition on block")
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return nil, errors.Wrap(err, "could not execute state transition")
}
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return nil, errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
if err := s.insertBlockToForkChoiceStore(ctx, b, root, postState); err != nil {
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustified(ctx, postState); err != nil {
return nil, err
}
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
// Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned.
s.forkChoiceStore.Prune(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return nil, errors.Wrap(err, "could not save new justified")
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
if postState.Slot() >= s.nextEpochBoundarySlot {
logEpochData(postState)
reportEpochMetrics(postState)
// Update committees cache at epoch boundary slot.
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return nil, err
}
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
}
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
return nil, err
}
// Delete the processed block attester slashings from slashings pool.
for i := 0; i < len(b.Body.AttesterSlashings); i++ {
s.slashingPool.MarkIncludedAttesterSlashing(b.Body.AttesterSlashings[i])
}
return postState, nil
}
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
// It runs state transition on the block and without any BLS verification. The excluded BLS verification
// includes attestation's aggregated signature. It also does not save attestations.
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
defer span.End()
if signed == nil || signed.Block == nil {
return errors.New("nil block")
}
b := signed.Block
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
// Retrieve incoming block's pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := preState.NumValidators()
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
root, err := ssz.HashTreeRoot(b)
if err != nil {
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
if err := s.insertBlockToForkChoiceStore(ctx, b, root, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
if flags.Get().EnableArchive {
atts := signed.Block.Body.Attestations
if err := s.beaconDB.SaveAttestations(ctx, atts); err != nil {
return errors.Wrapf(err, "could not save block attestations from slot %d", b.Slot)
}
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustified(ctx, postState); err != nil {
return err
}
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
// Epoch boundary bookkeeping such as logging epoch summaries.
if postState.Slot() >= s.nextEpochBoundarySlot {
reportEpochMetrics(postState)
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
// Update committees cache at epoch boundary slot.
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return err
}
if helpers.IsEpochStart(postState.Slot()) {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}
}
}
return nil
}
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte, state *stateTrie.BeaconState) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, state); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.forkChoiceStore.ProcessBlock(ctx,
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot),
state.CurrentJustifiedCheckpoint().Epoch,
state.FinalizedCheckpointEpoch()); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body.Attestations {
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return err
}
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
}
return nil
}

View File

@@ -0,0 +1,467 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() uint64 {
return uint64(time.Now().Unix()-s.genesisTime.Unix()) / params.BeaconConfig().SecondsPerSlot
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
defer span.End()
// Verify incoming block has a valid pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return nil, err
}
// Verify block slot time is not from the feature.
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot); err != nil {
return nil, err
}
// Verify block is a descendent of a finalized block.
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
return nil, err
}
// Verify block is later than the finalized epoch slot.
if err := s.verifyBlkFinalizedSlot(b); err != nil {
return nil, err
}
return preState, nil
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
var err error
if preState == nil {
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
}
if bytes.Equal(headRoot, b.ParentRoot) {
return s.HeadState(ctx)
}
}
preState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
if bytes.Equal(s.finalizedCheckpt.Root, b.ParentRoot) {
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
}
preState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, err
}
}
return preState, nil // No copy needed from newly hydrated DB object.
}
return preState.Copy(), nil
}
// verifyBlkDescendant validates input block root is a descendant of the
// current finalized block root.
func (s *Service) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
defer span.End()
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
return errors.Wrap(err, "could not get finalized block")
}
finalizedBlk := finalizedBlkSigned.Block
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if bFinalizedRoot == nil {
return fmt.Errorf("no finalized block known for block from slot %d", slot)
}
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
traceutil.AnnotateError(span, err)
return err
}
return nil
}
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if finalizedSlot >= b.Slot {
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
}
return nil
}
// saveNewValidators saves newly added validator indices from the state to db.
// Does nothing if validator count has not changed.
func (s *Service) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *stateTrie.BeaconState) error {
postStateValidatorCount := postState.NumValidators()
if preStateValidatorCount != postStateValidatorCount {
indices := make([]uint64, 0)
pubKeys := make([][48]byte, 0)
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
indices = append(indices, uint64(i))
pubKeys = append(pubKeys, postState.PubkeyAtIndex(uint64(i)))
}
if err := s.beaconDB.SaveValidatorIndices(ctx, pubKeys, indices); err != nil {
return errors.Wrapf(err, "could not save activated validators: %v", indices)
}
log.WithFields(logrus.Fields{
"indices": indices,
"totalValidatorCount": postStateValidatorCount - preStateValidatorCount,
}).Trace("Validator indices saved in DB")
}
return nil
}
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
func (s *Service) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
defer span.End()
// Make sure start slot is not a skipped slot
for i := startSlot; i > 0; i-- {
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
b, err := s.beaconDB.Blocks(ctx, filter)
if err != nil {
return err
}
if len(b) > 0 {
startSlot = i
break
}
}
// Make sure finalized slot is not a skipped slot.
for i := endSlot; i > 0; i-- {
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
b, err := s.beaconDB.Blocks(ctx, filter)
if err != nil {
return err
}
if len(b) > 0 {
endSlot = i - 1
break
}
}
// Do not remove genesis state
if startSlot == 0 {
startSlot++
}
// If end slot comes less than start slot
if endSlot < startSlot {
endSlot = startSlot
}
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
roots, err = s.filterBlockRoots(ctx, roots)
if err != nil {
return err
}
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
return err
}
return nil
}
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
return true, nil
}
newJustifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(newJustifiedCheckpt.Root))
if err != nil {
return false, err
}
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
return false, errors.New("nil new justified block")
}
newJustifiedBlock := newJustifiedBlockSigned.Block
if newJustifiedBlock.Slot <= helpers.StartSlot(s.justifiedCheckpt.Epoch) {
return false, nil
}
justifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
return false, err
}
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
return false, errors.New("nil justified block")
}
justifiedBlock := justifiedBlockSigned.Block
b, err := s.ancestor(ctx, newJustifiedCheckpt.Root, justifiedBlock.Slot)
if err != nil {
return false, err
}
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
return false, nil
}
return true, nil
}
func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconState) error {
cpt := state.CurrentJustifiedCheckpoint()
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
s.bestJustifiedCheckpt = cpt
}
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
if err != nil {
return err
}
if canUpdate {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cpt
}
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
}
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
// This saves every finalized state in DB during initial sync, needed as part of optimization to
// use cache state during initial sync in case of restart.
func (s *Service) saveInitState(ctx context.Context, state *stateTrie.BeaconState) error {
cpt := state.FinalizedCheckpoint()
finalizedRoot := bytesutil.ToBytes32(cpt.Root)
fs := s.initSyncState[finalizedRoot]
if fs == nil {
var err error
fs, err = s.beaconDB.State(ctx, finalizedRoot)
if err != nil {
return err
}
if fs == nil {
fs, err = s.generateState(ctx, bytesutil.ToBytes32(s.prevFinalizedCheckpt.Root), finalizedRoot)
if err != nil {
// This might happen if the client was in sync and is now re-syncing for whatever reason.
log.Warn("Initial sync cache did not have finalized state root cached")
return err
}
}
}
if err := s.beaconDB.SaveState(ctx, fs, finalizedRoot); err != nil {
return errors.Wrap(err, "could not save state")
}
return nil
}
// This filters block roots that are not known as head root and finalized root in DB.
// It serves as the last line of defence before we prune states.
func (s *Service) filterBlockRoots(ctx context.Context, roots [][32]byte) ([][32]byte, error) {
f, err := s.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return nil, err
}
fRoot := f.Root
h, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
return nil, err
}
hRoot, err := ssz.HashTreeRoot(h.Block)
if err != nil {
return nil, err
}
filtered := make([][32]byte, 0, len(roots))
for _, root := range roots {
if bytes.Equal(root[:], fRoot[:]) || bytes.Equal(root[:], hRoot[:]) {
continue
}
filtered = append(filtered, root)
}
return filtered, nil
}
// ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// return Bytes32() # root is older than queried slot: no results.
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
defer span.End()
// Stop recursive ancestry lookup if context is cancelled.
if ctx.Err() != nil {
return nil, ctx.Err()
}
signed, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(root))
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
}
if signed == nil || signed.Block == nil {
return nil, errors.New("nil block")
}
b := signed.Block
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
// operation can proceed. This is not an error condition.
if b == nil || b.Slot < slot {
return nil, nil
}
if b.Slot == slot {
return root, nil
}
return s.ancestor(ctx, b.ParentRoot, slot)
}
// This updates justified check point in store, if the new justified is later than stored justified or
// the store's justified is not in chain with finalized check point.
//
// Spec definition:
// if (
// state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch
// or get_ancestor(store, store.justified_checkpoint.root, finalized_slot) != store.finalized_checkpoint.root
// ):
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
return errors.Wrap(err, "could not get finalized block")
}
finalizedBlk := finalizedBlkSigned.Block
anc, err := s.ancestor(ctx, s.justifiedCheckpt.Root, finalizedBlk.Slot)
if err != nil {
return err
}
// Either the new justified is later than stored justified or not in chain with finalized check pint.
if cpt := state.CurrentJustifiedCheckpoint(); cpt != nil && cpt.Epoch > s.justifiedCheckpt.Epoch || !bytes.Equal(anc, s.finalizedCheckpt.Root) {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
}
return nil
}
// This retrieves missing blocks from DB (ie. the blocks that couldn't received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock, state *stateTrie.BeaconState) error {
pendingNodes := make([]*ethpb.BeaconBlock, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
slot := blk.Slot
// Fork choice only matters from last finalized slot.
higherThanFinalized := slot > helpers.StartSlot(s.finalizedCheckpt.Epoch)
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.beaconDB.Block(ctx, parentRoot)
if err != nil {
return err
}
pendingNodes = append(pendingNodes, b.Block)
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
slot = b.Block.Slot
higherThanFinalized = slot > helpers.StartSlot(s.finalizedCheckpt.Epoch)
}
// Insert parent nodes to fork choice store in reverse order.
// Lower slots should be at the end of the list.
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r, err := ssz.HashTreeRoot(b)
if err != nil {
return err
}
if err := s.forkChoiceStore.ProcessBlock(ctx,
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot),
state.CurrentJustifiedCheckpoint().Epoch,
state.FinalizedCheckpointEpoch()); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
}
return nil
}
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
for _, att := range atts {
if helpers.IsAggregated(att) {
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
return nil
}

View File

@@ -0,0 +1,701 @@
package blockchain
import (
"context"
"reflect"
"strings"
"testing"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
if err := db.SaveBlock(ctx, genesis); err != nil {
t.Error(err)
}
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
if err != nil {
t.Error(err)
}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
t.Fatal(err)
}
roots, err := blockTree1(db, validGenesisRoot[:])
if err != nil {
t.Fatal(err)
}
random := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1, ParentRoot: validGenesisRoot[:]}}
if err := db.SaveBlock(ctx, random); err != nil {
t.Error(err)
}
randomParentRoot, err := ssz.HashTreeRoot(random.Block)
if err != nil {
t.Error(err)
}
if err := service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot); err != nil {
t.Fatal(err)
}
randomParentRoot2 := roots[1]
if err := service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
blk *ethpb.BeaconBlock
s *stateTrie.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: &ethpb.BeaconBlock{},
s: st.Copy(),
wantErrString: "provided block root does not have block saved in the db",
},
{
name: "block is from the feature",
blk: &ethpb.BeaconBlock{ParentRoot: randomParentRoot[:], Slot: params.BeaconConfig().FarFutureEpoch},
s: st.Copy(),
wantErrString: "could not process slot from the future",
},
{
name: "could not get finalized block",
blk: &ethpb.BeaconBlock{ParentRoot: randomParentRoot[:]},
s: st.Copy(),
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
},
{
name: "same slot as finalized block",
blk: &ethpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
s: st.Copy(),
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.justifiedCheckpt = &ethpb.Checkpoint{Root: validGenesisRoot[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: validGenesisRoot[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: validGenesisRoot[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: validGenesisRoot[:]}
service.finalizedCheckpt.Root = roots[0]
_, err := service.onBlock(ctx, &ethpb.SignedBeaconBlock{Block: tt.blk})
if !strings.Contains(err.Error(), tt.wantErrString) {
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
}
})
}
}
func TestStore_SaveNewValidators(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
preCount := 2 // validators 0 and validators 1
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Validators: []*ethpb.Validator{
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}},
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}},
}})
if err := service.saveNewValidators(ctx, preCount, s); err != nil {
t.Fatal(err)
}
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}) {
t.Error("Wanted validator saved in db")
}
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}) {
t.Error("Wanted validator saved in db")
}
if db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) {
t.Error("validator not suppose to be saved in db")
}
}
func TestRemoveStateSinceLastFinalized(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
// Save 100 blocks in DB, each has a state.
numBlocks := 100
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
blockRoots := make([][32]byte, 0)
for i := 0; i < len(totalBlocks); i++ {
totalBlocks[i] = &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: uint64(i),
},
}
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
if err != nil {
t.Fatal(err)
}
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)})
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, totalBlocks[i]); err != nil {
t.Fatal(err)
}
blockRoots = append(blockRoots, r)
if err := service.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
t.Fatal(err)
}
}
// New finalized epoch: 1
finalizedEpoch := uint64(1)
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
if err := service.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
t.Fatal(err)
}
for _, r := range blockRoots {
s, err := service.beaconDB.State(ctx, r)
if err != nil {
t.Fatal(err)
}
// Also verifies genesis state didnt get deleted
if s != nil && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
t.Errorf("State with slot %d should not be in DB", s.Slot())
}
}
// New finalized epoch: 5
newFinalizedEpoch := uint64(5)
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
if err := service.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
t.Fatal(err)
}
for _, r := range blockRoots {
s, err := service.beaconDB.State(ctx, r)
if err != nil {
t.Fatal(err)
}
// Also verifies genesis state didnt get deleted
if s != nil && s.Slot() != newFinalizedSlot && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
t.Errorf("State with slot %d should not be in DB", s.Slot())
}
}
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
service.genesisTime = time.Now()
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{})
if err != nil {
t.Fatal(err)
}
if !update {
t.Error("Should be able to update justified, received false")
}
lastJustifiedBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{ParentRoot: []byte{'G'}}}
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
newJustifiedBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1, ParentRoot: lastJustifiedRoot[:]}}
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
if err := service.beaconDB.SaveBlock(ctx, newJustifiedBlk); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, lastJustifiedBlk); err != nil {
t.Fatal(err)
}
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: lastJustifiedRoot[:]}
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
if err != nil {
t.Fatal(err)
}
if !update {
t.Error("Should be able to update justified, received false")
}
}
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
lastJustifiedBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{ParentRoot: []byte{'G'}}}
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
newJustifiedBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{ParentRoot: lastJustifiedRoot[:]}}
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
if err := service.beaconDB.SaveBlock(ctx, newJustifiedBlk); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, lastJustifiedBlk); err != nil {
t.Fatal(err)
}
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: lastJustifiedRoot[:]}
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
if err != nil {
t.Fatal(err)
}
if update {
t.Error("Should not be able to update justified, received true")
}
}
func TestCachedPreState_CanGetFromCache(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
r := [32]byte{'A'}
b := &ethpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
service.initSyncState[r] = s
received, err := service.verifyBlkPreState(ctx, b)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(s.InnerStateUnsafe(), received.InnerStateUnsafe()) {
t.Error("cached state not the same")
}
}
func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
r := [32]byte{'A'}
b := &ethpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
_, err = service.verifyBlkPreState(ctx, b)
wanted := "pre state of slot 1 does not exist"
if err.Error() != wanted {
t.Error("Did not get wanted error")
}
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
service.beaconDB.SaveState(ctx, s, r)
received, err := service.verifyBlkPreState(ctx, b)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(s, received) {
t.Error("cached state not the same")
}
}
func TestSaveInitState_CanSaveDelete(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 64; i++ {
b := &ethpb.BeaconBlock{Slot: i}
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: i})
r, _ := ssz.HashTreeRoot(b)
service.initSyncState[r] = s
}
// Set finalized root as slot 32
finalizedRoot, _ := ssz.HashTreeRoot(&ethpb.BeaconBlock{Slot: 32})
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{
Epoch: 1, Root: finalizedRoot[:]}})
if err := service.saveInitState(ctx, s); err != nil {
t.Fatal(err)
}
// Verify finalized state is saved in DB
finalizedState, err := service.beaconDB.State(ctx, finalizedRoot)
if err != nil {
t.Fatal(err)
}
if finalizedState == nil {
t.Error("finalized state can't be nil")
}
}
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
signedBlock := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
if err := db.SaveBlock(ctx, signedBlock); err != nil {
t.Fatal(err)
}
r, err := ssz.HashTreeRoot(signedBlock.Block)
if err != nil {
t.Fatal(err)
}
service.justifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
service.initSyncState[r] = st.Copy()
if err := db.SaveState(ctx, st.Copy(), r); err != nil {
t.Fatal(err)
}
// Could update
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: r[:]}})
if err := service.updateJustified(context.Background(), s); err != nil {
t.Fatal(err)
}
if service.bestJustifiedCheckpt.Epoch != s.CurrentJustifiedCheckpoint().Epoch {
t.Error("Incorrect justified epoch in service")
}
// Could not update
service.bestJustifiedCheckpt.Epoch = 2
if err := service.updateJustified(context.Background(), s); err != nil {
t.Fatal(err)
}
if service.bestJustifiedCheckpt.Epoch != 2 {
t.Error("Incorrect justified epoch in service")
}
}
func TestFilterBlockRoots_CanFilter(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
fBlock := &ethpb.BeaconBlock{}
fRoot, _ := ssz.HashTreeRoot(fBlock)
hBlock := &ethpb.BeaconBlock{Slot: 1}
headRoot, _ := ssz.HashTreeRoot(hBlock)
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err := service.beaconDB.SaveBlock(ctx, &ethpb.SignedBeaconBlock{Block: fBlock}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, st.Copy(), fRoot); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: fRoot[:]}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, &ethpb.SignedBeaconBlock{Block: hBlock}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, st.Copy(), headRoot); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
t.Fatal(err)
}
roots := [][32]byte{{'C'}, {'D'}, headRoot, {'E'}, fRoot, {'F'}}
wanted := [][32]byte{{'C'}, {'D'}, {'E'}, {'F'}}
received, err := service.filterBlockRoots(ctx, roots)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(wanted, received) {
t.Error("Did not filter correctly")
}
}
func TestPersistCache_CanSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
for i := uint64(0); i < initialSyncCacheSize; i++ {
st.SetSlot(i)
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
service.boundaryRoots = append(service.boundaryRoots, root)
}
if err = service.persistCachedStates(ctx, initialSyncCacheSize); err != nil {
t.Fatal(err)
}
for i := uint64(0); i < initialSyncCacheSize-minimumCacheSize; i++ {
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
state, err := db.State(context.Background(), root)
if err != nil {
t.Errorf("State with root of %#x , could not be retrieved: %v", root, err)
}
if state == nil {
t.Errorf("State with root of %#x , does not exist", root)
}
if state.Slot() != i {
t.Errorf("Incorrect slot retrieved. Wanted %d but got %d", i, state.Slot())
}
}
}
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
if err := db.SaveBlock(ctx, genesis); err != nil {
t.Error(err)
}
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
if err != nil {
t.Error(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
t.Fatal(err)
}
roots, err := blockTree1(db, validGenesisRoot[:])
if err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
block := &ethpb.BeaconBlock{Slot: 9, ParentRoot: roots[8]}
if err := service.fillInForkChoiceMissingBlocks(context.Background(), block, beaconState); err != nil {
t.Fatal(err)
}
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
if len(service.forkChoiceStore.Nodes()) != 5 {
t.Error("Miss match nodes")
}
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])) {
t.Error("Didn't save node")
}
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])) {
t.Error("Didn't save node")
}
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])) {
t.Error("Didn't save node")
}
}
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
if err := db.SaveBlock(ctx, genesis); err != nil {
t.Error(err)
}
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
if err != nil {
t.Error(err)
}
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
t.Fatal(err)
}
// Define a tree branch, slot 63 <- 64 <- 65
b63 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 63}}
if err := service.beaconDB.SaveBlock(ctx, b63); err != nil {
t.Fatal(err)
}
r63, _ := ssz.HashTreeRoot(b63.Block)
b64 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 64, ParentRoot: r63[:]}}
if err := service.beaconDB.SaveBlock(ctx, b64); err != nil {
t.Fatal(err)
}
r64, _ := ssz.HashTreeRoot(b64.Block)
b65 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 65, ParentRoot: r64[:]}}
if err := service.beaconDB.SaveBlock(ctx, b65); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.fillInForkChoiceMissingBlocks(context.Background(), b65.Block, beaconState); err != nil {
t.Fatal(err)
}
// There should be 2 nodes, block 65 and block 64.
if len(service.forkChoiceStore.Nodes()) != 2 {
t.Error("Miss match nodes")
}
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
if !service.forkChoiceStore.HasNode(r63) {
t.Error("Didn't save node")
}
}
// blockTree1 constructs the following tree:
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
// (B1, and B3 are all from the same slots)
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
b0 := &ethpb.BeaconBlock{Slot: 0, ParentRoot: genesisRoot}
r0, _ := ssz.HashTreeRoot(b0)
b1 := &ethpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
r1, _ := ssz.HashTreeRoot(b1)
b3 := &ethpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
r3, _ := ssz.HashTreeRoot(b3)
b4 := &ethpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
r4, _ := ssz.HashTreeRoot(b4)
b5 := &ethpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
r5, _ := ssz.HashTreeRoot(b5)
b6 := &ethpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
r6, _ := ssz.HashTreeRoot(b6)
b7 := &ethpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
r7, _ := ssz.HashTreeRoot(b7)
b8 := &ethpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
r8, _ := ssz.HashTreeRoot(b8)
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
return nil, err
}
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
if err := db.SaveBlock(context.Background(), &ethpb.SignedBeaconBlock{Block: b}); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(b.ParentRoot)); err != nil {
return nil, err
}
}
if err := db.SaveState(context.Background(), st.Copy(), r1); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), r7); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), r8); err != nil {
return nil, err
}
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
}

View File

@@ -1,57 +1,27 @@
package blockchain
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-ssz"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
}
// ReceiveAttestation is a function that defines the operations that are preformed on
// attestation that is received from regular sync. The operations consist of:
// 1. Gossip attestation to other peers
// 2. Validate attestation, update validator's latest vote
// 3. Apply fork choice to the processed attestation
// 4. Save latest head info
func (s *Service) ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestation")
defer span.End()
// Broadcast the new attestation to the network.
if err := s.p2p.Broadcast(ctx, att); err != nil {
return errors.Wrap(err, "could not broadcast attestation")
}
attDataRoot, err := ssz.HashTreeRoot(att.Data)
if err != nil {
log.WithError(err).Error("Failed to hash attestation")
}
log.WithFields(logrus.Fields{
"attRoot": fmt.Sprintf("%#x", attDataRoot),
"blockRoot": fmt.Sprintf("%#x", att.Data.BeaconBlockRoot),
}).Debug("Broadcasting attestation")
if err := s.ReceiveAttestationNoPubsub(ctx, att); err != nil {
return err
}
processedAtt.Inc()
return nil
IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool
}
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
@@ -63,50 +33,107 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
defer span.End()
// Update forkchoice store for the new attestation
attSlot, err := s.forkChoiceStore.OnAttestation(ctx, att)
_, err := s.onAttestation(ctx, att)
if err != nil {
return errors.Wrap(err, "could not process attestation from fork choice service")
return errors.Wrap(err, "could not process attestation")
}
// Run fork choice for head block after updating fork choice store.
headRoot, err := s.forkChoiceStore.Head(ctx)
if err != nil {
return errors.Wrap(err, "could not get head from fork choice service")
}
// Only save head if it's different than the current head.
if !bytes.Equal(headRoot, s.HeadRoot()) {
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
baseState, err := s.getAttPreState(ctx, att.Data.Target)
if err != nil {
return errors.Wrap(err, "could not compute state from block head")
return err
}
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
return errors.Wrap(err, "could not save head")
// This updates fork choice head, if a new head could not be updated due to
// long range or intermediate forking. It simply logs a warning and returns nil
// as that's more appropriate than returning errors.
if err := s.updateHead(ctx, baseState.Balances()); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
return nil
}
}
// Skip checking for competing attestation's target roots at epoch boundary.
if !helpers.IsEpochStart(attSlot) {
s.headLock.RLock()
defer s.headLock.RUnlock()
targetRoot, err := helpers.BlockRoot(s.headState, att.Data.Target.Epoch)
if err != nil {
return errors.Wrapf(err, "could not get target root for epoch %d", att.Data.Target.Epoch)
}
isCompetingAtts(targetRoot, att.Data.Target.Root[:])
}
processedAttNoPubsub.Inc()
return nil
}
// This checks if the attestation is from a competing chain, emits warning and updates metrics.
func isCompetingAtts(headTargetRoot []byte, attTargetRoot []byte) {
if !bytes.Equal(attTargetRoot, headTargetRoot) {
log.WithFields(logrus.Fields{
"attTargetRoot": hex.EncodeToString(attTargetRoot),
"headTargetRoot": hex.EncodeToString(headTargetRoot),
}).Warn("target heads different from new attestation")
competingAtts.Inc()
// IsValidAttestation returns true if the attestation can be verified against its pre-state.
func (s *Service) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
baseState, err := s.getAttPreState(ctx, att.Data.Target)
if err != nil {
log.WithError(err).Error("Failed to validate attestation")
return false
}
if err := blocks.VerifyAttestation(ctx, baseState, att); err != nil {
log.WithError(err).Error("Failed to validate attestation")
return false
}
return true
}
// This processes attestations from the attestation pool to account for validator votes and fork choice.
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
subscribedToStateEvents <- struct{}{}
<-stateChannel
stateSub.Unsubscribe()
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
return
case <-st.C():
ctx := context.Background()
atts := s.attPool.ForkchoiceAttestations()
for _, a := range atts {
hasState := s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue
}
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
if !s.verifyCheckpointEpoch(a.Data.Target) {
continue
}
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
log.WithFields(logrus.Fields{
"slot": a.Data.Slot,
"committeeIndex": a.Data.CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
"aggregationCount": a.AggregationBits.Count(),
}).WithError(err).Warn("Could not receive attestation in chain service")
}
}
}
}
}
// This verifies the epoch of input checkpoint is within current epoch and previous epoch
// with respect to current time. Returns true if it's within, false if it's not.
func (s *Service) verifyCheckpointEpoch(c *ethpb.Checkpoint) bool {
now := uint64(time.Now().Unix())
genesisTime := uint64(s.genesisTime.Unix())
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
currentEpoch := helpers.SlotToEpoch(currentSlot)
var prevEpoch uint64
if currentEpoch > 1 {
prevEpoch = currentEpoch - 1
}
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
return false
}
return true
}

View File

@@ -2,112 +2,24 @@ package blockchain
import (
"testing"
"time"
"github.com/prysmaticlabs/go-ssz"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
"golang.org/x/net/context"
)
func TestReceiveAttestation_ProcessCorrectly(t *testing.T) {
hook := logTest.NewGlobal()
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
r, _ := ssz.SigningRoot(&ethpb.BeaconBlock{})
chainService.forkChoiceStore = &store{headRoot: r[:]}
chainService.genesisTime = time.Now()
b := &ethpb.BeaconBlock{}
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
root, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
t.Fatal(err)
if !chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{}) {
t.Error("Wanted true, got false")
}
a := &ethpb.Attestation{Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: root[:]},
}}
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
t.Fatal(err)
if chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 1}) {
t.Error("Wanted false, got true")
}
testutil.AssertLogsContain(t, hook, "Saved new head info")
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
}
func TestReceiveAttestation_SameHead(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
r, _ := ssz.SigningRoot(&ethpb.BeaconBlock{})
chainService.forkChoiceStore = &store{headRoot: r[:]}
chainService.canonicalRoots[0] = r[:]
b := &ethpb.BeaconBlock{}
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
root, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
t.Fatal(err)
}
a := &ethpb.Attestation{Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: root[:]},
}}
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
t.Fatal(err)
}
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
}
func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
r, _ := ssz.SigningRoot(&ethpb.BeaconBlock{})
chainService.forkChoiceStore = &store{headRoot: r[:]}
b := &ethpb.BeaconBlock{}
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
root, err := ssz.SigningRoot(b)
if err != nil {
t.Fatal(err)
}
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
t.Fatal(err)
}
a := &ethpb.Attestation{Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: root[:]},
}}
if err := chainService.ReceiveAttestationNoPubsub(ctx, a); err != nil {
t.Fatal(err)
}
testutil.AssertLogsContain(t, hook, "Saved new head info")
testutil.AssertLogsDoNotContain(t, hook, "Broadcasting attestation")
}

View File

@@ -5,11 +5,15 @@ import (
"context"
"encoding/hex"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -17,10 +21,10 @@ import (
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error
}
// ReceiveBlock is a function that defines the operations that are preformed on
@@ -29,11 +33,11 @@ type BlockReceiver interface {
// 2. Validate block, apply state transition and update check points
// 3. Apply fork choice to the processed block
// 4. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
defer span.End()
root, err := ssz.SigningRoot(block)
root, err := ssz.HashTreeRoot(block.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received block")
}
@@ -50,7 +54,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) er
return err
}
processedBlk.Inc()
return nil
}
@@ -59,57 +62,63 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) er
// 1. Validate block, apply state transition and update check points
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
defer span.End()
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the new block.
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
err := errors.Wrap(err, "could not process block from fork choice service")
postState, err := s.onBlock(ctx, blockCopy)
if err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
root, err := ssz.SigningRoot(blockCopy)
// Add attestations from the block to the pool for fork choice.
if err := s.attPool.SaveBlockAttestations(blockCopy.Block.Body.Attestations); err != nil {
log.Errorf("Could not save attestation for fork choice: %v", err)
return nil
}
for _, exit := range block.Block.Body.VoluntaryExits {
s.exitPool.MarkIncluded(exit)
}
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
root, err := ssz.HashTreeRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received block")
}
// Run fork choice after applying state transition on the new block.
headRoot, err := s.forkChoiceStore.Head(ctx)
if err != nil {
return errors.Wrap(err, "could not get head from fork choice service")
}
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
if err != nil {
return errors.Wrap(err, "could not compute state from block head")
}
// Only save head if it's different than the current head.
if !bytes.Equal(headRoot, s.HeadRoot()) {
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
if featureconfig.Get().DisableForkChoice && block.Block.Slot > s.headSlot() {
if err := s.saveHead(ctx, root); err != nil {
return errors.Wrap(err, "could not save head")
}
} else {
if err := s.updateHead(ctx, postState.Balances()); err != nil {
return errors.Wrap(err, "could not save head")
}
}
// Remove block's contained deposits, attestations, and other operations from persistent storage.
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: root,
Verified: true,
},
})
// Reports on block and fork choice metrics.
s.reportSlotMetrics(blockCopy.Slot)
// Log if block is a competing block.
isCompetingBlock(root[:], blockCopy.Slot, headRoot, headBlk.Slot)
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
// Log state transition data.
logStateTransitionData(blockCopy, root[:])
logStateTransitionData(blockCopy.Block)
processedBlkNoPubsub.Inc()
// We write the latest saved head root to a feed for consumption by other services.
s.headUpdatedFeed.Send(bytesutil.ToBytes32(headRoot))
return nil
}
@@ -117,111 +126,112 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconB
// that are preformed blocks that is received from initial sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// 2. Save latest head info
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
defer span.End()
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the incoming newly received block.
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
err := errors.Wrap(err, "could not process block from fork choice service")
// Apply state transition on the new block.
_, err := s.onBlock(ctx, blockCopy)
if err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
root, err := ssz.SigningRoot(blockCopy)
root, err := ssz.HashTreeRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received block")
}
if !bytes.Equal(root[:], s.HeadRoot()) {
if err := s.saveHead(ctx, blockCopy, root); err != nil {
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if !bytes.Equal(root[:], cachedHeadRoot) {
if err := s.saveHead(ctx, root); err != nil {
return errors.Wrap(err, "could not save head")
}
}
// Remove block's contained deposits, attestations, and other operations from persistent storage.
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: root,
Verified: true,
},
})
// Reports on block and fork choice metrics.
s.reportSlotMetrics(blockCopy.Slot)
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
// Log state transition data.
logStateTransitionData(blockCopy, root[:])
logStateTransitionData(blockCopy.Block)
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
// We write the latest saved head root to a feed for consumption by other services.
s.headUpdatedFeed.Send(root)
processedBlkNoPubsubForkchoice.Inc()
return nil
}
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
// It simulates light client behavior and assumes 100% trust with the syncing peer.
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
defer span.End()
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
if err := s.forkChoiceStore.OnBlockNoVerifyStateTransition(ctx, blockCopy); err != nil {
return errors.Wrap(err, "could not process blockCopy from fork choice service")
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy); err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
root, err := ssz.SigningRoot(blockCopy)
root, err := ssz.HashTreeRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received blockCopy")
}
if !bytes.Equal(root[:], s.HeadRoot()) {
if err := s.saveHead(ctx, blockCopy, root); err != nil {
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if !bytes.Equal(root[:], cachedHeadRoot) {
if err := s.saveHeadNoDB(ctx, blockCopy, root); err != nil {
err := errors.Wrap(err, "could not save head")
traceutil.AnnotateError(span, err)
return err
}
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: root,
Verified: false,
},
})
// Reports on blockCopy and fork choice metrics.
s.reportSlotMetrics(blockCopy.Slot)
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
// Log state transition data.
log.WithFields(logrus.Fields{
"slot": blockCopy.Slot,
"attestations": len(blockCopy.Body.Attestations),
"deposits": len(blockCopy.Body.Deposits),
"slot": blockCopy.Block.Slot,
"attestations": len(blockCopy.Block.Body.Attestations),
"deposits": len(blockCopy.Block.Body.Deposits),
}).Debug("Finished applying state transition")
// We write the latest saved head root to a feed for consumption by other services.
s.headUpdatedFeed.Send(root)
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
return nil
}
// cleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
// from our node's local cache, and process validator exits and more.
func (s *Service) cleanupBlockOperations(ctx context.Context, block *ethpb.BeaconBlock) error {
// Forward processed block to operation pool to remove individual operation from DB.
if s.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
log.Error("Sent processed block to no subscribers")
}
// Remove pending deposits from the deposit queue.
for _, dep := range block.Body.Deposits {
s.depositCache.RemovePendingDeposit(ctx, dep)
}
return nil
}
// This checks if the block is from a competing chain, emits warning and updates metrics.
func isCompetingBlock(root []byte, slot uint64, headRoot []byte, headSlot uint64) {
if !bytes.Equal(root[:], headRoot) {
log.WithFields(logrus.Fields{
"blkSlot": slot,
"blkRoot": hex.EncodeToString(root[:]),
"headSlot": headSlot,
"headRoot": hex.EncodeToString(headRoot),
}).Warn("Calculated head diffs from new block")
competingBlks.Inc()
}
}

View File

@@ -1,275 +0,0 @@
package blockchain
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/prysmaticlabs/go-ssz"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
beaconState, err := state.GenesisBeaconState(deposits, 0, &ethpb.Eth1Data{BlockHash: make([]byte, 32)})
if err != nil {
t.Fatal(err)
}
beaconState.Eth1Data.BlockHash = nil
beaconState.Eth1DepositIndex = 100
stateRoot, err := ssz.HashTreeRoot(beaconState)
if err != nil {
t.Fatal(err)
}
genesis := b.NewGenesisBlock(stateRoot[:])
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
if err != nil {
t.Fatal(err)
}
genesisBlkRoot, err := ssz.SigningRoot(genesis)
if err != nil {
t.Fatal(err)
}
cp := &ethpb.Checkpoint{Root: genesisBlkRoot[:]}
if err := chainService.forkChoiceStore.GenesisStore(ctx, cp, cp); err != nil {
t.Fatal(err)
}
beaconState.LatestBlockHeader = &ethpb.BeaconBlockHeader{
Slot: genesis.Slot,
ParentRoot: genesis.ParentRoot,
BodyRoot: bodyRoot[:],
StateRoot: genesis.StateRoot,
}
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
t.Fatalf("Could not save block to db: %v", err)
}
parentRoot, err := ssz.SigningRoot(genesis)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
t.Fatal(err)
}
slot := beaconState.Slot + 1
epoch := helpers.SlotToEpoch(slot)
beaconState.Slot++
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
if err != nil {
t.Fatal(err)
}
beaconState.Slot--
block := &ethpb.BeaconBlock{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBody{
Eth1Data: &ethpb.Eth1Data{
DepositCount: uint64(len(deposits)),
DepositRoot: []byte("a"),
BlockHash: []byte("b"),
},
RandaoReveal: randaoReveal[:],
Attestations: nil,
},
}
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
if err != nil {
t.Fatal(err)
}
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
if err != nil {
t.Fatal(err)
}
block.StateRoot = stateRoot[:]
block, err = testutil.SignBlock(beaconState, block, privKeys)
if err != nil {
t.Error(err)
}
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
t.Fatal(err)
}
if err := chainService.ReceiveBlock(context.Background(), block); err != nil {
t.Errorf("Block failed processing: %v", err)
}
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
}
func TestReceiveReceiveBlockNoPubsub_CanSaveHeadInfo(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
headBlk := &ethpb.BeaconBlock{Slot: 100}
if err := db.SaveBlock(ctx, headBlk); err != nil {
t.Fatal(err)
}
r, err := ssz.SigningRoot(headBlk)
if err != nil {
t.Fatal(err)
}
chainService.forkChoiceStore = &store{headRoot: r[:]}
if err := chainService.ReceiveBlockNoPubsub(ctx, &ethpb.BeaconBlock{
Slot: 1,
Body: &ethpb.BeaconBlockBody{}}); err != nil {
t.Fatal(err)
}
if !bytes.Equal(r[:], chainService.HeadRoot()) {
t.Error("Incorrect head root saved")
}
if !reflect.DeepEqual(headBlk, chainService.HeadBlock()) {
t.Error("Incorrect head block saved")
}
testutil.AssertLogsContain(t, hook, "Saved new head info")
}
func TestReceiveReceiveBlockNoPubsub_SameHead(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
headBlk := &ethpb.BeaconBlock{}
if err := db.SaveBlock(ctx, headBlk); err != nil {
t.Fatal(err)
}
newBlk := &ethpb.BeaconBlock{
Slot: 1,
Body: &ethpb.BeaconBlockBody{}}
newRoot, _ := ssz.SigningRoot(newBlk)
if err := db.SaveBlock(ctx, newBlk); err != nil {
t.Fatal(err)
}
chainService.forkChoiceStore = &store{headRoot: newRoot[:]}
chainService.canonicalRoots[0] = newRoot[:]
if err := chainService.ReceiveBlockNoPubsub(ctx, newBlk); err != nil {
t.Fatal(err)
}
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
}
func TestReceiveBlockNoPubsubForkchoice_ProcessCorrectly(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
chainService := setupBeaconChain(t, db)
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
beaconState, err := state.GenesisBeaconState(deposits, 0, &ethpb.Eth1Data{BlockHash: make([]byte, 32)})
if err != nil {
t.Fatal(err)
}
beaconState.Eth1DepositIndex = 100
stateRoot, err := ssz.HashTreeRoot(beaconState)
if err != nil {
t.Fatal(err)
}
genesis := b.NewGenesisBlock(stateRoot[:])
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
if err != nil {
t.Fatal(err)
}
if err := chainService.forkChoiceStore.GenesisStore(ctx, &ethpb.Checkpoint{}, &ethpb.Checkpoint{}); err != nil {
t.Fatal(err)
}
beaconState.LatestBlockHeader = &ethpb.BeaconBlockHeader{
Slot: genesis.Slot,
ParentRoot: genesis.ParentRoot,
BodyRoot: bodyRoot[:],
StateRoot: genesis.StateRoot,
}
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
t.Fatalf("Could not save block to db: %v", err)
}
parentRoot, err := ssz.SigningRoot(genesis)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
t.Fatal(err)
}
slot := beaconState.Slot + 1
epoch := helpers.SlotToEpoch(slot)
beaconState.Slot++
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
if err != nil {
t.Fatal(err)
}
beaconState.Slot--
block := &ethpb.BeaconBlock{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBody{
Eth1Data: &ethpb.Eth1Data{
DepositCount: uint64(len(deposits)),
DepositRoot: []byte("a"),
BlockHash: []byte("b"),
},
RandaoReveal: randaoReveal[:],
Attestations: nil,
},
}
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
if err != nil {
t.Fatal(err)
}
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
if err != nil {
t.Fatal(err)
}
block.StateRoot = stateRoot[:]
block, err = testutil.SignBlock(beaconState, block, privKeys)
if err != nil {
t.Error(err)
}
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
t.Fatal(err)
}
if err := chainService.ReceiveBlockNoPubsubForkchoice(context.Background(), block); err != nil {
t.Errorf("Block failed processing: %v", err)
}
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
testutil.AssertLogsDoNotContain(t, hook, "Finished fork choice")
}

View File

@@ -11,90 +11,107 @@ import (
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/operations"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/sirupsen/logrus"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ChainFeeds interface defines the methods of the Service which provide state related
// information feeds to consumers.
type ChainFeeds interface {
StateInitializedFeed() *event.Feed
}
// NewHeadNotifier defines a struct which can notify many consumers of a new,
// canonical chain head event occuring in the node.
type NewHeadNotifier interface {
HeadUpdatedFeed() *event.Feed
}
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
ctx context.Context
cancel context.CancelFunc
beaconDB db.Database
depositCache *depositcache.DepositCache
chainStartFetcher powchain.ChainStartFetcher
opsPoolService operations.OperationFeeds
forkChoiceStore forkchoice.ForkChoicer
chainStartChan chan time.Time
genesisTime time.Time
stateInitializedFeed *event.Feed
headUpdatedFeed *event.Feed
p2p p2p.Broadcaster
maxRoutines int64
headSlot uint64
headBlock *ethpb.BeaconBlock
headState *pb.BeaconState
canonicalRoots map[uint64][]byte
headLock sync.RWMutex
ctx context.Context
cancel context.CancelFunc
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
chainStartFetcher powchain.ChainStartFetcher
attPool attestations.Pool
slashingPool *slashings.Pool
exitPool *voluntaryexits.Pool
genesisTime time.Time
p2p p2p.Broadcaster
maxRoutines int64
head *head
headLock sync.RWMutex
stateNotifier statefeed.Notifier
genesisRoot [32]byte
epochParticipation map[uint64]*precompute.Balance
epochParticipationLock sync.RWMutex
forkChoiceStore f.ForkChoicer
justifiedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
prevFinalizedCheckpt *ethpb.Checkpoint
nextEpochBoundarySlot uint64
voteLock sync.RWMutex
initSyncState map[[32]byte]*stateTrie.BeaconState
boundaryRoots [][32]byte
initSyncStateLock sync.RWMutex
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
stateGen *stategen.State
}
// Config options for the service.
type Config struct {
BeaconBlockBuf int
ChainStartFetcher powchain.ChainStartFetcher
BeaconDB db.Database
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
OpsPoolService operations.OperationFeeds
AttPool attestations.Pool
ExitPool *voluntaryexits.Pool
SlashingPool *slashings.Pool
P2p p2p.Broadcaster
MaxRoutines int64
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
}
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
store := forkchoice.NewForkChoiceService(ctx, cfg.BeaconDB)
return &Service{
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
depositCache: cfg.DepositCache,
chainStartFetcher: cfg.ChainStartFetcher,
opsPoolService: cfg.OpsPoolService,
forkChoiceStore: store,
chainStartChan: make(chan time.Time),
stateInitializedFeed: new(event.Feed),
headUpdatedFeed: new(event.Feed),
p2p: cfg.P2p,
canonicalRoots: make(map[uint64][]byte),
maxRoutines: cfg.MaxRoutines,
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
depositCache: cfg.DepositCache,
chainStartFetcher: cfg.ChainStartFetcher,
attPool: cfg.AttPool,
exitPool: cfg.ExitPool,
slashingPool: cfg.SlashingPool,
p2p: cfg.P2p,
maxRoutines: cfg.MaxRoutines,
stateNotifier: cfg.StateNotifier,
epochParticipation: make(map[uint64]*precompute.Balance),
forkChoiceStore: cfg.ForkChoiceStore,
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
checkpointState: cache.NewCheckpointStateCache(),
stateGen: stategen.New(cfg.BeaconDB),
}, nil
}
@@ -105,10 +122,28 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
// For running initial sync with state cache, in an event of restart, we use
// last finalized check point as start point to sync instead of head
// state. This is because we no longer save state every slot during sync.
cp, err := s.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if beaconState == nil {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
}
// Make sure that attestation processor is subscribed and ready for state initializing event.
attestationProcessorSubscribed := make(chan struct{}, 1)
// If the chain has already been initialized, simply start the block processing routine.
if beaconState != nil {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
if err := s.initializeChainInfo(ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
@@ -120,34 +155,74 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not get finalized checkpoint: %v", err)
}
if err := s.forkChoiceStore.GenesisStore(ctx, justifiedCheckpoint, finalizedCheckpoint); err != nil {
log.Fatalf("Could not start fork choice service: %v", err)
// Resume fork choice.
s.justifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
if finalizedCheckpoint.Epoch > 1 {
if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil {
log.WithError(err).Warn("Could not prune old states")
}
}
s.stateInitializedFeed.Send(s.genesisTime)
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
},
})
} else {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.chainStartFetcher == nil {
log.Fatal("Not configured web3Service for POW chain")
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
}
subChainStart := s.chainStartFetcher.ChainStartFeed().Subscribe(s.chainStartChan)
go func() {
genesisTime := <-s.chainStartChan
s.processChainStartTime(ctx, genesisTime, subChainStart)
return
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
<-attestationProcessorSubscribed
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.ChainStarted {
data := event.Data.(*statefeed.ChainStartedData)
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
s.processChainStartTime(ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state notifier failed")
return
}
}
}()
}
go s.processAttestation(attestationProcessorSubscribed)
}
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time, chainStartSub event.Subscription) {
initialDeposits := s.chainStartFetcher.ChainStartDeposits()
if err := s.initializeBeaconChain(ctx, genesisTime, initialDeposits, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.chainStartFetcher.PreGenesisState()
if err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
log.Fatalf("Could not initialize beacon chain: %v", err)
}
s.stateInitializedFeed.Send(genesisTime)
chainStartSub.Unsubscribe()
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
},
})
}
// initializes the state and genesis block of the beacon chain to persistent storage
@@ -156,15 +231,14 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
func (s *Service) initializeBeaconChain(
ctx context.Context,
genesisTime time.Time,
deposits []*ethpb.Deposit,
preGenesisState *stateTrie.BeaconState,
eth1data *ethpb.Eth1Data) error {
_, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain")
defer span.End()
log.Info("Genesis time reached, starting the beacon chain")
s.genesisTime = genesisTime
unixTime := uint64(genesisTime.Unix())
genesisState, err := state.GenesisBeaconState(deposits, unixTime, eth1data)
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
if err != nil {
return errors.Wrap(err, "could not initialize genesis state")
}
@@ -173,11 +247,17 @@ func (s *Service) initializeBeaconChain(
return errors.Wrap(err, "could not save genesis data")
}
log.Info("Initialized beacon chain genesis state")
// Clear out all pre-genesis data now that the state is initialized.
s.chainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
if featureconfig.Get().EnableNewCache {
if err := helpers.UpdateCommitteeCache(genesisState); err != nil {
return err
}
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil {
return err
}
return nil
@@ -198,66 +278,32 @@ func (s *Service) Status() error {
return nil
}
// StateInitializedFeed returns a feed that is written to
// when the beacon state is first initialized.
func (s *Service) StateInitializedFeed() *event.Feed {
return s.stateInitializedFeed
// ClearCachedStates removes all stored caches states. This is done after the node
// is synced.
func (s *Service) ClearCachedStates() {
s.initSyncState = map[[32]byte]*stateTrie.BeaconState{}
}
// HeadUpdatedFeed is a feed containing the head block root and
// is written to when a new head block is saved to DB.
func (s *Service) HeadUpdatedFeed() *event.Feed {
return s.headUpdatedFeed
}
// This gets called when beacon chain is first initialized to save validator indices and public keys in db.
func (s *Service) saveGenesisValidators(ctx context.Context, state *stateTrie.BeaconState) error {
pubkeys := make([][48]byte, state.NumValidators())
indices := make([]uint64, state.NumValidators())
// This gets called to update canonical root mapping.
func (s *Service) saveHead(ctx context.Context, b *ethpb.BeaconBlock, r [32]byte) error {
s.headLock.Lock()
defer s.headLock.Unlock()
s.headSlot = b.Slot
s.canonicalRoots[b.Slot] = r[:]
if err := s.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
return errors.Wrap(err, "could not save head root in DB")
for i := 0; i < state.NumValidators(); i++ {
pubkeys[i] = state.PubkeyAtIndex(uint64(i))
indices[i] = uint64(i)
}
s.headBlock = b
return s.beaconDB.SaveValidatorIndices(ctx, pubkeys, indices)
}
headState, err := s.beaconDB.State(ctx, r)
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
stateRoot, err := genesisState.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
s.headState = headState
log.WithFields(logrus.Fields{
"slot": b.Slot,
"headRoot": fmt.Sprintf("%#x", r),
}).Debug("Saved new head info")
return nil
}
// This gets called when beacon chain is first initialized to save validator indices and pubkeys in db
func (s *Service) saveGenesisValidators(ctx context.Context, state *pb.BeaconState) error {
for i, v := range state.Validators {
if err := s.beaconDB.SaveValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey), uint64(i)); err != nil {
return errors.Wrapf(err, "could not save validator index: %d", i)
}
}
return nil
}
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db
func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
stateRoot, err := ssz.HashTreeRoot(genesisState)
if err != nil {
return errors.Wrap(err, "could not tree hash genesis state")
return err
}
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
if err != nil {
return errors.Wrap(err, "could not get genesis block root")
}
@@ -265,35 +311,73 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconSt
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
}
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could save genesis block root")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.saveGenesisValidators(ctx, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis validators")
}
genesisCheckpoint := &ethpb.Checkpoint{Root: genesisBlkRoot[:]}
if err := s.forkChoiceStore.GenesisStore(ctx, genesisCheckpoint, genesisCheckpoint); err != nil {
return errors.Wrap(err, "Could not start fork choice service: %v")
// Add the genesis block to the fork choice store.
s.justifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
if err := s.forkChoiceStore.ProcessBlock(ctx,
genesisBlk.Block.Slot,
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
genesisCheckpoint.Epoch,
genesisCheckpoint.Epoch); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
s.headBlock = genesisBlk
s.headState = genesisState
s.canonicalRoots[genesisState.Slot] = genesisBlkRoot[:]
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil
}
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
func (s *Service) initializeChainInfo(ctx context.Context) error {
s.headLock.Lock()
defer s.headLock.Unlock()
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get genesis block from db")
}
if genesisBlock == nil {
return errors.New("no genesis block in db")
}
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root of genesis block")
}
s.genesisRoot = genesisBlkRoot
if flags.Get().UnsafeSync {
headBlock, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not retrieve head block")
}
headRoot, err := ssz.HashTreeRoot(headBlock.Block)
if err != nil {
return errors.Wrap(err, "could not hash head block")
}
headState, err := s.beaconDB.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not retrieve head state")
}
s.setHead(headRoot, headBlock, headState)
return nil
}
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
@@ -304,17 +388,58 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
s.headState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
finalizedState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
s.headBlock, err = s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
finalizedBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
s.headSlot = s.headState.Slot
s.canonicalRoots[s.headSlot] = finalized.Root
if finalizedState == nil || finalizedBlock == nil {
return errors.New("finalized state and block can't be nil")
}
s.setHead(bytesutil.ToBytes32(finalized.Root), finalizedBlock, finalizedState)
return nil
}
// This is called when a client starts from a non-genesis slot. It deletes the states in DB
// from slot 1 (avoid genesis state) to `slot`.
func (s *Service) pruneGarbageState(ctx context.Context, slot uint64) error {
if featureconfig.Get().DontPruneStateStartUp {
return nil
}
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(slot)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
return err
}
return nil
}
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
// information to fork choice service to initializes fork choice store.
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
s.forkChoiceStore = store
}
// This returns true if block has been processed before. Two ways to verify the block has been processed:
// 1.) Check fork choice store.
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
if s.forkChoiceStore.HasNode(root) {
return true
}
return s.beaconDB.HasBlock(ctx, root)
}

View File

@@ -6,7 +6,6 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/sirupsen/logrus"
)
@@ -19,19 +18,16 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
beaconDB: db,
canonicalRoots: make(map[uint64][]byte),
beaconDB: db,
}
go func() {
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 777},
[32]byte{},
)
}()
s.saveHead(
context.Background(),
&ethpb.BeaconBlock{Slot: 888},
[32]byte{},
)
}

View File

@@ -4,28 +4,31 @@ import (
"bytes"
"context"
"encoding/hex"
"errors"
"io/ioutil"
"math/big"
"reflect"
"testing"
"time"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
ssz "github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -33,10 +36,6 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
// Ensure Service implements interfaces.
var _ = ChainFeeds(&Service{})
var _ = NewHeadNotifier(&Service{})
func init() {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
@@ -46,16 +45,20 @@ type store struct {
headRoot []byte
}
func (s *store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
return nil
func (s *store) OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
return nil, nil
}
func (s *store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
return nil
func (s *store) OnBlockCacheFilteredTree(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
return nil, nil
}
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
return 0, nil
func (s *store) OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
return nil, nil
}
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
return nil, nil
}
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
@@ -66,109 +69,24 @@ func (s *store) FinalizedCheckpt() *ethpb.Checkpoint {
return nil
}
func (s *store) JustifiedCheckpt() *ethpb.Checkpoint {
return nil
}
func (s *store) Head(ctx context.Context) ([]byte, error) {
return s.headRoot, nil
}
type mockOperationService struct{}
func (ms *mockOperationService) IncomingProcessedBlockFeed() *event.Feed {
return new(event.Feed)
type mockBeaconNode struct {
stateFeed *event.Feed
}
func (ms *mockOperationService) IncomingAttFeed() *event.Feed {
return nil
}
func (ms *mockOperationService) IncomingExitFeed() *event.Feed {
return nil
}
type mockClient struct{}
func (m *mockClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
return new(event.Feed).Subscribe(ch), nil
}
func (m *mockClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
return gethTypes.NewBlockWithHeader(head), nil
}
func (m *mockClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
return gethTypes.NewBlockWithHeader(head), nil
}
func (m *mockClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
return &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}, nil
}
func (m *mockClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
return new(event.Feed).Subscribe(ch), nil
}
func (m *mockClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
return []byte{'t', 'e', 's', 't'}, nil
}
func (m *mockClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
return []byte{'t', 'e', 's', 't'}, nil
}
func (m *mockClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
logs := make([]gethTypes.Log, 3)
for i := 0; i < len(logs); i++ {
logs[i].Address = common.Address{}
logs[i].Topics = make([]common.Hash, 5)
logs[i].Topics[0] = common.Hash{'a'}
logs[i].Topics[1] = common.Hash{'b'}
logs[i].Topics[2] = common.Hash{'c'}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return logs, nil
}
func (m *mockClient) LatestBlockHash() common.Hash {
return common.BytesToHash([]byte{'A'})
}
type faultyClient struct{}
func (f *faultyClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
return new(event.Feed).Subscribe(ch), nil
}
func (f *faultyClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
return nil, errors.New("failed")
}
func (f *faultyClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
return nil, errors.New("failed")
}
func (f *faultyClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
return nil, errors.New("failed")
}
func (f *faultyClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
return new(event.Feed).Subscribe(ch), nil
}
func (f *faultyClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
return nil, errors.New("unable to retrieve logs")
}
func (f *faultyClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
return []byte{}, errors.New("unable to retrieve contract code")
}
func (f *faultyClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
return []byte{}, errors.New("unable to retrieve contract code")
}
func (f *faultyClient) LatestBlockHash() common.Hash {
return common.BytesToHash([]byte{'A'})
return mbn.stateFeed
}
type mockBroadcaster struct {
@@ -182,24 +100,32 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
var _ = p2p.Broadcaster(&mockBroadcaster{})
func setupGenesisBlock(t *testing.T, cs *Service) ([32]byte, *ethpb.BeaconBlock) {
genesis := b.NewGenesisBlock([]byte{})
if err := cs.beaconDB.SaveBlock(context.Background(), genesis); err != nil {
t.Fatalf("could not save block to db: %v", err)
}
parentHash, err := ssz.SigningRoot(genesis)
if err != nil {
t.Fatalf("unable to get tree hash root of canonical head: %v", err)
}
return parentHash, genesis
}
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
endpoint := "ws://127.0.0.1"
ctx := context.Background()
var web3Service *powchain.Service
var err error
bState, _ := testutil.DeterministicGenesisState(t, 10)
err = beaconDB.SavePowchainData(ctx, &protodb.ETH1ChainData{
BeaconState: bState.InnerStateUnsafe(),
Trie: &protodb.SparseMerkleTrie{},
CurrentEth1Data: &protodb.LatestETH1Data{
BlockHash: make([]byte, 32),
},
ChainstartData: &protodb.ChainStartData{
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
},
DepositContainers: []*protodb.DepositContainer{},
})
if err != nil {
t.Fatal(err)
}
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
BeaconDB: beaconDB,
ETH1Endpoint: endpoint,
DepositContract: common.Address{},
})
@@ -212,8 +138,10 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
BeaconDB: beaconDB,
DepositCache: depositcache.NewDepositCache(),
ChainStartFetcher: web3Service,
OpsPoolService: &mockOperationService{},
P2p: &mockBroadcaster{},
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
}
if err != nil {
t.Fatalf("could not register blockchain service: %v", err)
@@ -222,37 +150,53 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
if err != nil {
t.Fatalf("unable to setup chain service: %v", err)
}
chainService.genesisTime = time.Unix(1, 0) // non-zero time
return chainService
}
func TestChainStartStop_Uninitialized(t *testing.T) {
helpers.ClearAllCaches()
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
chainService := setupBeaconChain(t, db)
// Test the start function.
genesisChan := make(chan time.Time, 0)
sub := chainService.stateInitializedFeed.Subscribe(genesisChan)
defer sub.Unsubscribe()
// Listen for state events.
stateSubChannel := make(chan *feed.Event, 1)
stateSub := chainService.stateNotifier.StateFeed().Subscribe(stateSubChannel)
// Test the chain start state notifier.
genesisTime := time.Unix(1, 0)
chainService.Start()
chainService.chainStartChan <- time.Unix(0, 0)
genesisTime := <-genesisChan
if genesisTime != time.Unix(0, 0) {
t.Errorf(
"Expected genesis time to equal chainstart time (%v), received %v",
time.Unix(0, 0),
genesisTime,
)
event := &feed.Event{
Type: statefeed.ChainStarted,
Data: &statefeed.ChainStartedData{
StartTime: genesisTime,
},
}
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 1; sent == 1; {
sent = chainService.stateNotifier.StateFeed().Send(event)
if sent == 1 {
// Flush our local subscriber.
<-stateSubChannel
}
}
// Now wait for notification the state is ready.
for stateInitialized := false; stateInitialized == false; {
recv := <-stateSubChannel
if recv.Type == statefeed.Initialized {
stateInitialized = true
}
}
stateSub.Unsubscribe()
beaconState, err := db.HeadState(context.Background())
if err != nil {
t.Fatal(err)
}
if beaconState == nil || beaconState.Slot != 0 {
if beaconState == nil || beaconState.Slot() != 0 {
t.Error("Expected canonical state feed to send a state with genesis block")
}
if err := chainService.Stop(); err != nil {
@@ -263,7 +207,7 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
t.Error("Context was not canceled")
}
testutil.AssertLogsContain(t, hook, "Waiting")
testutil.AssertLogsContain(t, hook, "Genesis time reached")
testutil.AssertLogsContain(t, hook, "Initialized beacon chain genesis state")
}
func TestChainStartStop_Initialized(t *testing.T) {
@@ -275,22 +219,26 @@ func TestChainStartStop_Initialized(t *testing.T) {
chainService := setupBeaconChain(t, db)
genesisBlk := b.NewGenesisBlock([]byte{})
blkRoot, err := ssz.SigningRoot(genesisBlk)
blkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
t.Fatal(err)
}
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, s, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}); err != nil {
t.Fatal(err)
}
@@ -315,33 +263,57 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
ctx := context.Background()
bc := setupBeaconChain(t, db)
var err error
// Set up 10 deposits pre chain start for validators to register
count := uint64(10)
deposits, _, _ := testutil.SetupInitialDeposits(t, count)
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), deposits, &ethpb.Eth1Data{}); err != nil {
deposits, _, _ := testutil.DeterministicDepositsAndKeys(count)
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
if err != nil {
t.Fatal(err)
}
hashTreeRoot := trie.HashTreeRoot()
genState, err := state.EmptyGenesisState()
if err != nil {
t.Fatal(err)
}
genState.SetEth1Data(&ethpb.Eth1Data{
DepositRoot: hashTreeRoot[:],
DepositCount: uint64(len(deposits)),
})
genState, err = b.ProcessDeposits(ctx, genState, &ethpb.BeaconBlockBody{Deposits: deposits})
if err != nil {
t.Fatal(err)
}
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, &ethpb.Eth1Data{
DepositRoot: hashTreeRoot[:],
}); err != nil {
t.Fatal(err)
}
s, err := bc.beaconDB.State(ctx, bytesutil.ToBytes32(bc.canonicalRoots[0]))
s, err := bc.beaconDB.State(ctx, bc.headRoot())
if err != nil {
t.Fatal(err)
}
for _, v := range s.Validators {
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey)) {
for _, v := range s.Validators() {
if !db.HasValidatorIndex(ctx, v.PublicKey) {
t.Errorf("Validator %s missing from db", hex.EncodeToString(v.PublicKey))
}
}
if bc.HeadState() == nil {
if _, err := bc.HeadState(ctx); err != nil {
t.Error(err)
}
headBlk, err := bc.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
if headBlk == nil {
t.Error("Head state can't be nil after initialize beacon chain")
}
if bc.HeadBlock() == nil {
t.Error("Head state can't be nil after initialize beacon chain")
}
if bc.CanonicalRoot(0) == nil {
t.Error("Canonical root for slot 0 can't be nil after initialize beacon chain")
if bc.headRoot() == params.BeaconConfig().ZeroHash {
t.Error("Canonical root for slot 0 can't be zeros after initialize beacon chain")
}
}
@@ -351,7 +323,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
ctx := context.Background()
genesis := b.NewGenesisBlock([]byte{})
genesisRoot, err := ssz.SigningRoot(genesis)
genesisRoot, err := ssz.HashTreeRoot(genesis.Block)
if err != nil {
t.Fatal(err)
}
@@ -363,9 +335,12 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
}
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := &ethpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}
headState := &pb.BeaconState{Slot: finalizedSlot}
headRoot, _ := ssz.SigningRoot(headBlock)
headBlock := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}}
headState, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: finalizedSlot})
if err != nil {
t.Fatal(err)
}
headRoot, _ := ssz.HashTreeRoot(headBlock.Block)
if err := db.SaveState(ctx, headState, headRoot); err != nil {
t.Fatal(err)
}
@@ -381,20 +356,181 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
if err := db.SaveBlock(ctx, headBlock); err != nil {
t.Fatal(err)
}
c := &Service{beaconDB: db, canonicalRoots: make(map[uint64][]byte)}
c := &Service{beaconDB: db}
if err := c.initializeChainInfo(ctx); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
headBlk, err := c.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(headBlk, headBlock) {
t.Error("head block incorrect")
}
if !reflect.DeepEqual(c.HeadState(), headState) {
t.Error("head block incorrect")
s, err := c.HeadState(ctx)
if err != nil {
t.Fatal(err)
}
if headBlock.Slot != c.HeadSlot() {
if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
t.Error("head state incorrect")
}
if headBlock.Block.Slot != c.HeadSlot() {
t.Error("head slot incorrect")
}
if !bytes.Equal(headRoot[:], c.HeadRoot()) {
r, err := c.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(headRoot[:], r) {
t.Error("head slot incorrect")
}
if c.genesisRoot != genesisRoot {
t.Error("genesis block root incorrect")
}
}
func TestChainService_SaveHeadNoDB(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &Service{
beaconDB: db,
}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
r, _ := ssz.HashTreeRoot(b)
state := &pb.BeaconState{}
newState, err := beaconstate.InitializeFromProto(state)
s.beaconDB.SaveState(ctx, newState, r)
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
t.Fatal(err)
}
newB, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
if reflect.DeepEqual(newB, b) {
t.Error("head block should not be equal")
}
}
func TestChainService_PruneOldStates(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &Service{
beaconDB: db,
}
for i := 0; i < 100; i++ {
block := &ethpb.BeaconBlock{Slot: uint64(i)}
if err := s.beaconDB.SaveBlock(ctx, &ethpb.SignedBeaconBlock{Block: block}); err != nil {
t.Fatal(err)
}
r, err := ssz.HashTreeRoot(block)
if err != nil {
t.Fatal(err)
}
state := &pb.BeaconState{Slot: uint64(i)}
newState, err := beaconstate.InitializeFromProto(state)
if err != nil {
t.Fatal(err)
}
if err := s.beaconDB.SaveState(ctx, newState, r); err != nil {
t.Fatal(err)
}
}
// Delete half of the states.
if err := s.pruneGarbageState(ctx, 50); err != nil {
t.Fatal(err)
}
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(100)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
t.Fatal(err)
}
for i := 1; i < 50; i++ {
s, err := s.beaconDB.State(ctx, roots[i])
if err != nil {
t.Fatal(err)
}
if s != nil {
t.Errorf("wanted nil for slot %d", i)
}
}
}
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{},
beaconDB: db,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, _ := ssz.HashTreeRoot(block.Block)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{}}
state, _ := beaconstate.InitializeFromProto(bs)
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
t.Fatal(err)
}
if s.hasBlock(ctx, [32]byte{}) {
t.Error("Should not have block")
}
if !s.hasBlock(ctx, r) {
t.Error("Should have block")
}
}
func BenchmarkHasBlockDB(b *testing.B) {
db := testDB.SetupDB(b)
defer testDB.TeardownDB(b, db)
ctx := context.Background()
s := &Service{
beaconDB: db,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
if err := s.beaconDB.SaveBlock(ctx, block); err != nil {
b.Fatal(err)
}
r, _ := ssz.HashTreeRoot(block.Block)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !s.beaconDB.HasBlock(ctx, r) {
b.Fatal("Block is not in DB")
}
}
}
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
db := testDB.SetupDB(b)
defer testDB.TeardownDB(b, db)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{},
beaconDB: db,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, _ := ssz.HashTreeRoot(block.Block)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{}}
state, _ := beaconstate.InitializeFromProto(bs)
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !s.forkChoiceStore.HasNode(r) {
b.Fatal("Block is not in fork choice store")
}
}
}

View File

@@ -7,11 +7,18 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -6,53 +6,131 @@ import (
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
// ChainService defines the mock interface for testing
type ChainService struct {
State *pb.BeaconState
Root []byte
Block *ethpb.BeaconBlock
FinalizedCheckPoint *ethpb.Checkpoint
StateFeed *event.Feed
BlocksReceived []*ethpb.BeaconBlock
Genesis time.Time
Fork *pb.Fork
DB db.Database
State *stateTrie.BeaconState
Root []byte
Block *ethpb.SignedBeaconBlock
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
BlocksReceived []*ethpb.SignedBeaconBlock
Balance *precompute.Balance
Genesis time.Time
Fork *pb.Fork
DB db.Database
stateNotifier statefeed.Notifier
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
ValidAttestation bool
}
// StateNotifier mocks the same method in the chain service.
func (ms *ChainService) StateNotifier() statefeed.Notifier {
if ms.stateNotifier == nil {
ms.stateNotifier = &MockStateNotifier{}
}
return ms.stateNotifier
}
// BlockNotifier mocks the same method in the chain service.
func (ms *ChainService) BlockNotifier() blockfeed.Notifier {
if ms.blockNotifier == nil {
ms.blockNotifier = &MockBlockNotifier{}
}
return ms.blockNotifier
}
// MockBlockNotifier mocks the block notifier.
type MockBlockNotifier struct {
feed *event.Feed
}
// BlockFeed returns a block feed.
func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
if msn.feed == nil {
msn.feed = new(event.Feed)
}
return msn.feed
}
// MockStateNotifier mocks the state notifier.
type MockStateNotifier struct {
feed *event.Feed
}
// StateFeed returns a state feed.
func (msn *MockStateNotifier) StateFeed() *event.Feed {
if msn.feed == nil {
msn.feed = new(event.Feed)
}
return msn.feed
}
// OperationNotifier mocks the same method in the chain service.
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
if ms.opNotifier == nil {
ms.opNotifier = &MockOperationNotifier{}
}
return ms.opNotifier
}
// MockOperationNotifier mocks the operation notifier.
type MockOperationNotifier struct {
feed *event.Feed
}
// OperationFeed returns an operation feed.
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
if mon.feed == nil {
mon.feed = new(event.Feed)
}
return mon.feed
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
return nil
}
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
return nil
}
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
return nil
}
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
if ms.State == nil {
ms.State = &pb.BeaconState{}
ms.State = &stateTrie.BeaconState{}
}
if !bytes.Equal(ms.Root, block.ParentRoot) {
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.ParentRoot)
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
}
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
return err
}
ms.State.Slot = block.Slot
ms.BlocksReceived = append(ms.BlocksReceived, block)
signingRoot, err := ssz.SigningRoot(block)
signingRoot, err := ssz.HashTreeRoot(block.Block)
if err != nil {
return err
}
@@ -60,7 +138,7 @@ func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, bloc
if err := ms.DB.SaveBlock(ctx, block); err != nil {
return err
}
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Slot)
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
}
ms.Root = signingRoot[:]
ms.Block = block
@@ -69,24 +147,26 @@ func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, bloc
// HeadSlot mocks HeadSlot method in chain service.
func (ms *ChainService) HeadSlot() uint64 {
return ms.State.Slot
if ms.State == nil {
return 0
}
return ms.State.Slot()
}
// HeadRoot mocks HeadRoot method in chain service.
func (ms *ChainService) HeadRoot() []byte {
return ms.Root
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
return ms.Root, nil
}
// HeadBlock mocks HeadBlock method in chain service.
func (ms *ChainService) HeadBlock() *ethpb.BeaconBlock {
return ms.Block
func (ms *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
return ms.Block, nil
}
// HeadState mocks HeadState method in chain service.
func (ms *ChainService) HeadState() *pb.BeaconState {
return ms.State
func (ms *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
return ms.State, nil
}
// CurrentFork mocks HeadState method in chain service.
@@ -99,6 +179,16 @@ func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
return ms.FinalizedCheckPoint
}
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
return ms.CurrentJustifiedCheckPoint
}
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
return ms.PreviousJustifiedCheckPoint
}
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
return nil
@@ -109,21 +199,38 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
return nil
}
// StateInitializedFeed mocks the same method in the chain service.
func (ms *ChainService) StateInitializedFeed() *event.Feed {
if ms.StateFeed != nil {
return ms.StateFeed
// HeadValidatorsIndices mocks the same method in the chain service.
func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
if ms.State == nil {
return []uint64{}, nil
}
ms.StateFeed = new(event.Feed)
return ms.StateFeed
return helpers.ActiveValidatorIndices(ms.State, epoch)
}
// HeadUpdatedFeed mocks the same method in the chain service.
func (ms *ChainService) HeadUpdatedFeed() *event.Feed {
return new(event.Feed)
// HeadSeed mocks the same method in the chain service.
func (ms *ChainService) HeadSeed(epoch uint64) ([32]byte, error) {
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
}
// GenesisTime mocks the same method in the chain service.
func (ms *ChainService) GenesisTime() time.Time {
return ms.Genesis
}
// CurrentSlot mocks the same method in the chain service.
func (ms *ChainService) CurrentSlot() uint64 {
return 0
}
// Participation mocks the same method in the chain service.
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
return ms.Balance
}
// IsValidAttestation always returns true.
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
return ms.ValidAttestation
}
// ClearCachedStates does nothing.
func (ms *ChainService) ClearCachedStates() {}

View File

@@ -3,27 +3,26 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"active_count.go",
"active_indices.go",
"attestation_data.go",
"checkpoint_state.go",
"committee.go",
"common.go",
"eth1_data.go",
"hot_state_cache.go",
"skip_slot_cache.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//beacon-chain/state:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
],
)
@@ -32,24 +31,25 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
"active_count_test.go",
"active_indices_test.go",
"attestation_data_test.go",
"benchmarks_test.go",
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"eth1_data_test.go",
"feature_flag_test.go",
"hot_state_cache_test.go",
"skip_slot_cache_test.go",
],
embed = [":go_default_library"],
race = "on",
deps = [
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],
)

View File

@@ -1,102 +0,0 @@
package cache
import (
"errors"
"strconv"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"k8s.io/client-go/tools/cache"
)
var (
// ErrNotActiveCountInfo will be returned when a cache object is not a pointer to
// a ActiveCountByEpoch struct.
ErrNotActiveCountInfo = errors.New("object is not a active count obj")
// maxActiveCountListSize defines the max number of active count can cache.
maxActiveCountListSize = 1000
// Metrics.
activeCountCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "active_validator_count_cache_miss",
Help: "The number of active validator count requests that aren't present in the cache.",
})
activeCountCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "active_validator_count_cache_hit",
Help: "The number of active validator count requests that are present in the cache.",
})
)
// ActiveCountByEpoch defines the active validator count per epoch.
type ActiveCountByEpoch struct {
Epoch uint64
ActiveCount uint64
}
// ActiveCountCache is a struct with 1 queue for looking up active count by epoch.
type ActiveCountCache struct {
activeCountCache *cache.FIFO
lock sync.RWMutex
}
// activeCountKeyFn takes the epoch as the key for the active count of a given epoch.
func activeCountKeyFn(obj interface{}) (string, error) {
aInfo, ok := obj.(*ActiveCountByEpoch)
if !ok {
return "", ErrNotActiveCountInfo
}
return strconv.Itoa(int(aInfo.Epoch)), nil
}
// NewActiveCountCache creates a new active count cache for storing/accessing active validator count.
func NewActiveCountCache() *ActiveCountCache {
return &ActiveCountCache{
activeCountCache: cache.NewFIFO(activeCountKeyFn),
}
}
// ActiveCountInEpoch fetches ActiveCountByEpoch by epoch. Returns true with a
// reference to the ActiveCountInEpoch info, if exists. Otherwise returns false, nil.
func (c *ActiveCountCache) ActiveCountInEpoch(epoch uint64) (uint64, error) {
if !featureconfig.Get().EnableActiveCountCache {
return params.BeaconConfig().FarFutureEpoch, nil
}
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.activeCountCache.GetByKey(strconv.Itoa(int(epoch)))
if err != nil {
return params.BeaconConfig().FarFutureEpoch, err
}
if exists {
activeCountCacheHit.Inc()
} else {
activeCountCacheMiss.Inc()
return params.BeaconConfig().FarFutureEpoch, nil
}
aInfo, ok := obj.(*ActiveCountByEpoch)
if !ok {
return params.BeaconConfig().FarFutureEpoch, ErrNotActiveCountInfo
}
return aInfo.ActiveCount, nil
}
// AddActiveCount adds ActiveCountByEpoch object to the cache. This method also trims the least
// recently added ActiveCountByEpoch object if the cache size has ready the max cache size limit.
func (c *ActiveCountCache) AddActiveCount(activeCount *ActiveCountByEpoch) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.activeCountCache.AddIfNotPresent(activeCount); err != nil {
return err
}
trim(c.activeCountCache, maxActiveCountListSize)
return nil
}

View File

@@ -1,83 +0,0 @@
package cache
import (
"reflect"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestActiveCountKeyFn_OK(t *testing.T) {
aInfo := &ActiveCountByEpoch{
Epoch: 999,
ActiveCount: 10,
}
key, err := activeCountKeyFn(aInfo)
if err != nil {
t.Fatal(err)
}
if key != strconv.Itoa(int(aInfo.Epoch)) {
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
}
}
func TestActiveCountKeyFn_InvalidObj(t *testing.T) {
_, err := activeCountKeyFn("bad")
if err != ErrNotActiveCountInfo {
t.Errorf("Expected error %v, got %v", ErrNotActiveCountInfo, err)
}
}
func TestActiveCountCache_ActiveCountByEpoch(t *testing.T) {
cache := NewActiveCountCache()
aInfo := &ActiveCountByEpoch{
Epoch: 99,
ActiveCount: 11,
}
activeCount, err := cache.ActiveCountInEpoch(aInfo.Epoch)
if err != nil {
t.Fatal(err)
}
if activeCount != params.BeaconConfig().FarFutureEpoch {
t.Error("Expected active count not to exist in empty cache")
}
if err := cache.AddActiveCount(aInfo); err != nil {
t.Fatal(err)
}
activeCount, err = cache.ActiveCountInEpoch(aInfo.Epoch)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(activeCount, aInfo.ActiveCount) {
t.Errorf(
"Expected fetched active count to be %v, got %v",
aInfo.ActiveCount,
activeCount,
)
}
}
func TestActiveCount_MaxSize(t *testing.T) {
cache := NewActiveCountCache()
for i := uint64(0); i < 1001; i++ {
aInfo := &ActiveCountByEpoch{
Epoch: i,
}
if err := cache.AddActiveCount(aInfo); err != nil {
t.Fatal(err)
}
}
if len(cache.activeCountCache.ListKeys()) != maxActiveCountListSize {
t.Errorf(
"Expected hash cache key size to be %d, got %d",
maxActiveCountListSize,
len(cache.activeCountCache.ListKeys()),
)
}
}

View File

@@ -1,106 +0,0 @@
package cache
import (
"errors"
"strconv"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"k8s.io/client-go/tools/cache"
)
var (
// ErrNotActiveIndicesInfo will be returned when a cache object is not a pointer to
// a ActiveIndicesByEpoch struct.
ErrNotActiveIndicesInfo = errors.New("object is not a active indices list")
// maxActiveIndicesListSize defines the max number of active indices can cache.
maxActiveIndicesListSize = 4
// Metrics.
activeIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "active_validator_indices_cache_miss",
Help: "The number of active validator indices requests that aren't present in the cache.",
})
activeIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "active_validator_indices_cache_hit",
Help: "The number of active validator indices requests that are present in the cache.",
})
)
// ActiveIndicesByEpoch defines the active validator indices per epoch.
type ActiveIndicesByEpoch struct {
Epoch uint64
ActiveIndices []uint64
}
// ActiveIndicesCache is a struct with 1 queue for looking up active indices by epoch.
type ActiveIndicesCache struct {
activeIndicesCache *cache.FIFO
lock sync.RWMutex
}
// activeIndicesKeyFn takes the epoch as the key for the active indices of a given epoch.
func activeIndicesKeyFn(obj interface{}) (string, error) {
aInfo, ok := obj.(*ActiveIndicesByEpoch)
if !ok {
return "", ErrNotActiveIndicesInfo
}
return strconv.Itoa(int(aInfo.Epoch)), nil
}
// NewActiveIndicesCache creates a new active indices cache for storing/accessing active validator indices.
func NewActiveIndicesCache() *ActiveIndicesCache {
return &ActiveIndicesCache{
activeIndicesCache: cache.NewFIFO(activeIndicesKeyFn),
}
}
// ActiveIndicesInEpoch fetches ActiveIndicesByEpoch by epoch. Returns true with a
// reference to the ActiveIndicesInEpoch info, if exists. Otherwise returns false, nil.
func (c *ActiveIndicesCache) ActiveIndicesInEpoch(epoch uint64) ([]uint64, error) {
if !featureconfig.Get().EnableActiveIndicesCache {
return nil, nil
}
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.activeIndicesCache.GetByKey(strconv.Itoa(int(epoch)))
if err != nil {
return nil, err
}
if exists {
activeIndicesCacheHit.Inc()
} else {
activeIndicesCacheMiss.Inc()
return nil, nil
}
aInfo, ok := obj.(*ActiveIndicesByEpoch)
if !ok {
return nil, ErrNotActiveIndicesInfo
}
return aInfo.ActiveIndices, nil
}
// AddActiveIndicesList adds ActiveIndicesByEpoch object to the cache. This method also trims the least
// recently added ActiveIndicesByEpoch object if the cache size has ready the max cache size limit.
func (c *ActiveIndicesCache) AddActiveIndicesList(activeIndices *ActiveIndicesByEpoch) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.activeIndicesCache.AddIfNotPresent(activeIndices); err != nil {
return err
}
trim(c.activeIndicesCache, maxActiveIndicesListSize)
return nil
}
// ActiveIndicesKeys returns the keys of the active indices cache.
func (c *ActiveIndicesCache) ActiveIndicesKeys() []string {
return c.activeIndicesCache.ListKeys()
}

View File

@@ -1,82 +0,0 @@
package cache
import (
"reflect"
"strconv"
"testing"
)
func TestActiveIndicesKeyFn_OK(t *testing.T) {
aInfo := &ActiveIndicesByEpoch{
Epoch: 999,
ActiveIndices: []uint64{1, 2, 3, 4, 5},
}
key, err := activeIndicesKeyFn(aInfo)
if err != nil {
t.Fatal(err)
}
if key != strconv.Itoa(int(aInfo.Epoch)) {
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
}
}
func TestActiveIndicesKeyFn_InvalidObj(t *testing.T) {
_, err := activeIndicesKeyFn("bad")
if err != ErrNotActiveIndicesInfo {
t.Errorf("Expected error %v, got %v", ErrNotActiveIndicesInfo, err)
}
}
func TestActiveIndicesCache_ActiveIndicesByEpoch(t *testing.T) {
cache := NewActiveIndicesCache()
aInfo := &ActiveIndicesByEpoch{
Epoch: 99,
ActiveIndices: []uint64{1, 2, 3, 4},
}
activeIndices, err := cache.ActiveIndicesInEpoch(aInfo.Epoch)
if err != nil {
t.Fatal(err)
}
if activeIndices != nil {
t.Error("Expected active indices not to exist in empty cache")
}
if err := cache.AddActiveIndicesList(aInfo); err != nil {
t.Fatal(err)
}
activeIndices, err = cache.ActiveIndicesInEpoch(aInfo.Epoch)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(activeIndices, aInfo.ActiveIndices) {
t.Errorf(
"Expected fetched active indices to be %v, got %v",
aInfo.ActiveIndices,
activeIndices,
)
}
}
func TestActiveIndices_MaxSize(t *testing.T) {
cache := NewActiveIndicesCache()
for i := uint64(0); i < 100; i++ {
aInfo := &ActiveIndicesByEpoch{
Epoch: i,
}
if err := cache.AddActiveIndicesList(aInfo); err != nil {
t.Fatal(err)
}
}
if len(cache.activeIndicesCache.ListKeys()) != maxActiveIndicesListSize {
t.Errorf(
"Expected hash cache key size to be %d, got %d",
maxActiveIndicesListSize,
len(cache.activeIndicesCache.ListKeys()),
)
}
}

View File

@@ -10,9 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"k8s.io/client-go/tools/cache"
)
@@ -59,13 +57,7 @@ func NewAttestationCache() *AttestationCache {
// Get waits for any in progress calculation to complete before returning a
// cached response, if any.
func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest) (*ethpb.AttestationData, error) {
if !featureconfig.Get().EnableAttestationCache {
// Return a miss result if cache is not enabled.
attestationCacheMiss.Inc()
return nil, nil
}
func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
if req == nil {
return nil, errors.New("nil attestation data request")
}
@@ -113,11 +105,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest)
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
if !featureconfig.Get().EnableAttestationCache {
return nil
}
func (c *AttestationCache) MarkInProgress(req *ethpb.AttestationDataRequest) error {
c.lock.Lock()
defer c.lock.Unlock()
s, e := reqToKey(req)
@@ -127,19 +115,13 @@ func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
if c.inProgress[s] {
return ErrAlreadyInProgress
}
if featureconfig.Get().EnableAttestationCache {
c.inProgress[s] = true
}
c.inProgress[s] = true
return nil
}
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
if !featureconfig.Get().EnableAttestationCache {
return nil
}
func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest) error {
c.lock.Lock()
defer c.lock.Unlock()
s, e := reqToKey(req)
@@ -151,11 +133,7 @@ func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
}
// Put the response in the cache.
func (c *AttestationCache) Put(ctx context.Context, req *pb.AttestationRequest, res *ethpb.AttestationData) error {
if !featureconfig.Get().EnableAttestationCache {
return nil
}
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
data := &attestationReqResWrapper{
req,
res,
@@ -180,11 +158,11 @@ func wrapperToKey(i interface{}) (string, error) {
return reqToKey(w.req)
}
func reqToKey(req *pb.AttestationRequest) (string, error) {
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
}
type attestationReqResWrapper struct {
req *pb.AttestationRequest
req *ethpb.AttestationDataRequest
res *ethpb.AttestationData
}

View File

@@ -5,16 +5,15 @@ import (
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
)
func TestAttestationCache_RoundTrip(t *testing.T) {
ctx := context.Background()
c := cache.NewAttestationCache()
req := &pb.AttestationRequest{
req := &ethpb.AttestationDataRequest{
CommitteeIndex: 0,
Slot: 1,
}

View File

@@ -1,45 +0,0 @@
package cache
import (
"testing"
)
var indices300k = createIndices(300000)
var epoch = uint64(1)
func createIndices(count int) *ActiveIndicesByEpoch {
indices := make([]uint64, 0, count)
for i := 0; i < count; i++ {
indices = append(indices, uint64(i))
}
return &ActiveIndicesByEpoch{
Epoch: epoch,
ActiveIndices: indices,
}
}
func BenchmarkCachingAddRetrieve(b *testing.B) {
c := NewActiveIndicesCache()
b.Run("ADD300K", func(b *testing.B) {
b.N = 10
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := c.AddActiveIndicesList(indices300k); err != nil {
b.Fatal(err)
}
}
})
b.Run("RETR300K", func(b *testing.B) {
b.N = 10
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := c.ActiveIndicesInEpoch(epoch); err != nil {
b.Fatal(err)
}
}
})
}

View File

@@ -4,11 +4,10 @@ import (
"errors"
"sync"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"k8s.io/client-go/tools/cache"
)
@@ -19,7 +18,9 @@ var (
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
maxCheckpointStateSize = 4
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
// window to accept attestation based on latest spec.
maxCheckpointStateSize = 10
// Metrics.
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{
@@ -35,7 +36,7 @@ var (
// CheckpointState defines the active validator indices per epoch.
type CheckpointState struct {
Checkpoint *ethpb.Checkpoint
State *pb.BeaconState
State *stateTrie.BeaconState
}
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
@@ -67,7 +68,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
// StateByCheckpoint fetches state by checkpoint. Returns true with a
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.BeaconState, error) {
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
c.lock.RLock()
defer c.lock.RUnlock()
h, err := hashutil.HashProto(cp)
@@ -92,7 +93,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.Beac
return nil, ErrNotCheckpointState
}
return proto.Clone(info.State).(*pb.BeaconState), nil
return info.State.Copy(), nil
}
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
@@ -100,7 +101,10 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.Beac
func (c *CheckpointStateCache) AddCheckpointState(cp *CheckpointState) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.cache.AddIfNotPresent(cp); err != nil {
if err := c.cache.AddIfNotPresent(&CheckpointState{
Checkpoint: stateTrie.CopyCheckpoint(cp.Checkpoint),
State: cp.State.Copy(),
}); err != nil {
return err
}

View File

@@ -4,16 +4,23 @@ import (
"reflect"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 64,
})
if err != nil {
t.Fatal(err)
}
info := &CheckpointState{
Checkpoint: cp,
State: &pb.BeaconState{Slot: 64},
State: st,
}
key, err := checkpointState(info)
if err != nil {
@@ -39,9 +46,15 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
cache := NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 64,
})
if err != nil {
t.Fatal(err)
}
info1 := &CheckpointState{
Checkpoint: cp1,
State: &pb.BeaconState{Slot: 64},
State: st,
}
state, err := cache.StateByCheckpoint(cp1)
if err != nil {
@@ -58,14 +71,20 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(state, info1.State) {
if !reflect.DeepEqual(state.InnerStateUnsafe(), info1.State.InnerStateUnsafe()) {
t.Error("incorrectly cached state")
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 128,
})
if err != nil {
t.Fatal(err)
}
info2 := &CheckpointState{
Checkpoint: cp2,
State: &pb.BeaconState{Slot: 128},
State: st2,
}
if err := cache.AddCheckpointState(info2); err != nil {
t.Fatal(err)
@@ -74,7 +93,7 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(state, info2.State) {
if !reflect.DeepEqual(state.CloneInnerState(), info2.State.CloneInnerState()) {
t.Error("incorrectly cached state")
}
@@ -82,18 +101,26 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(state, info1.State) {
if !reflect.DeepEqual(state.CloneInnerState(), info1.State.CloneInnerState()) {
t.Error("incorrectly cached state")
}
}
func TestCheckpointStateCache__MaxSize(t *testing.T) {
func TestCheckpointStateCache_MaxSize(t *testing.T) {
c := NewCheckpointStateCache()
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 0,
})
if err != nil {
t.Fatal(err)
}
for i := 0; i < maxCheckpointStateSize+100; i++ {
if err := st.SetSlot(uint64(i)); err != nil {
t.Fatal(err)
}
info := &CheckpointState{
Checkpoint: &ethpb.Checkpoint{Epoch: uint64(i)},
State: &pb.BeaconState{Slot: uint64(i)},
State: st,
}
if err := c.AddCheckpointState(info); err != nil {
t.Fatal(err)

View File

@@ -2,12 +2,10 @@ package cache
import (
"errors"
"strconv"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"k8s.io/client-go/tools/cache"
@@ -18,9 +16,10 @@ var (
// a Committee struct.
ErrNotCommittee = errors.New("object is not a committee struct")
// maxShuffledIndicesSize defines the max number of shuffled indices list can cache.
// 3 for previous, current epoch and next epoch.
maxShuffledIndicesSize = 3
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
// cache size as it considers 3 concurrent branches over 3 epochs.
maxCommitteesCacheSize = 10
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
@@ -34,47 +33,45 @@ var (
})
)
// Committee defines the committee per epoch and index.
type Committee struct {
CommitteeCount uint64
Epoch uint64
Committee []uint64
// Committees defines the shuffled committees seed.
type Committees struct {
CommitteeCount uint64
Seed [32]byte
ShuffledIndices []uint64
SortedIndices []uint64
ProposerIndices []uint64
}
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by epoch and committee index.
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
type CommitteeCache struct {
CommitteeCache *cache.FIFO
lock sync.RWMutex
}
// committeeKeyFn takes the epoch as the key to retrieve shuffled indices of a committee in a given epoch.
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
func committeeKeyFn(obj interface{}) (string, error) {
info, ok := obj.(*Committee)
info, ok := obj.(*Committees)
if !ok {
return "", ErrNotCommittee
}
return strconv.Itoa(int(info.Epoch)), nil
return key(info.Seed), nil
}
// NewCommitteeCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteeCache() *CommitteeCache {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: cache.NewFIFO(committeeKeyFn),
}
}
// ShuffledIndices fetches the shuffled indices by slot and committee index. Every list of indices
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, error) {
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
return nil, nil
}
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(epoch))
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return nil, err
}
@@ -86,7 +83,7 @@ func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, e
return nil, nil
}
item, ok := obj.(*Committee)
item, ok := obj.(*Committees)
if !ok {
return nil, ErrNotCommittee
}
@@ -98,100 +95,61 @@ func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, e
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
start, end := startEndIndices(item, indexOffSet)
return item.Committee[start:end], nil
if int(end) > len(item.ShuffledIndices) {
return nil, errors.New("requested index out of bound")
}
return item.ShuffledIndices[start:end], nil
}
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
// his method also trims the least recently list if the cache size has ready the max cache size limit.
func (c *CommitteeCache) AddCommitteeShuffledList(committee *Committee) error {
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
return nil
}
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.CommitteeCache.AddIfNotPresent(committee); err != nil {
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
return err
}
trim(c.CommitteeCache, maxShuffledIndicesSize)
trim(c.CommitteeCache, maxCommitteesCacheSize)
return nil
}
// Epochs returns the epochs stored in the committee cache. These are the keys to the cache.
func (c *CommitteeCache) Epochs() ([]uint64, error) {
if !featureconfig.Get().EnableShuffledIndexCache {
return nil, nil
}
c.lock.RLock()
defer c.lock.RUnlock()
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
func (c *CommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
c.lock.Lock()
defer c.lock.Unlock()
epochs := make([]uint64, len(c.CommitteeCache.ListKeys()))
for i, s := range c.CommitteeCache.ListKeys() {
epoch, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
epochs[i] = uint64(epoch)
}
return epochs, nil
}
// EpochInCache returns true if an input epoch is part of keys in cache.
func (c *CommitteeCache) EpochInCache(wantedEpoch uint64) (bool, error) {
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
return false, nil
}
c.lock.RLock()
defer c.lock.RUnlock()
for _, s := range c.CommitteeCache.ListKeys() {
epoch, err := strconv.Atoi(s)
if err != nil {
return false, err
}
if wantedEpoch == uint64(epoch) {
return true, nil
}
}
return false, nil
}
// CommitteeCountPerSlot returns the number of committees in a given slot as stored in cache.
func (c *CommitteeCache) CommitteeCountPerSlot(slot uint64) (uint64, bool, error) {
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
return 0, false, nil
}
c.lock.RLock()
defer c.lock.RUnlock()
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return 0, false, err
return err
}
if exists {
CommitteeCacheHit.Inc()
if !exists {
committees := &Committees{ProposerIndices: indices}
if err := c.CommitteeCache.Add(committees); err != nil {
return err
}
} else {
CommitteeCacheMiss.Inc()
return 0, false, nil
committees, ok := obj.(*Committees)
if !ok {
return ErrNotCommittee
}
committees.ProposerIndices = indices
if err := c.CommitteeCache.Add(committees); err != nil {
return err
}
}
item, ok := obj.(*Committee)
if !ok {
return 0, false, ErrNotCommittee
}
return item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch, true, nil
trim(c.CommitteeCache, maxCommitteesCacheSize)
return nil
}
// ActiveIndices returns the active indices of a given epoch stored in cache.
func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
return nil, nil
}
// ActiveIndices returns the active indices of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return nil, err
}
@@ -203,18 +161,49 @@ func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
return nil, nil
}
item, ok := obj.(*Committee)
item, ok := obj.(*Committees)
if !ok {
return nil, ErrNotCommittee
}
return item.Committee, nil
return item.SortedIndices, nil
}
func startEndIndices(c *Committee, index uint64) (uint64, uint64) {
validatorCount := uint64(len(c.Committee))
// ProposerIndices returns the proposer indices of a given seed.
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return nil, err
}
if exists {
CommitteeCacheHit.Inc()
} else {
CommitteeCacheMiss.Inc()
return nil, nil
}
item, ok := obj.(*Committees)
if !ok {
return nil, ErrNotCommittee
}
return item.ProposerIndices, nil
}
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
validatorCount := uint64(len(c.ShuffledIndices))
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
return start, end
}
// Using seed as source for key to handle reorgs in the same epoch.
// The seed is derived from state's array of randao mixes and epoch value
// hashed together. This avoids collisions on different validator set. Spec definition:
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
func key(seed [32]byte) string {
return string(seed[:])
}

View File

@@ -0,0 +1,68 @@
package cache
import (
"reflect"
"testing"
fuzz "github.com/google/gofuzz"
)
func TestCommitteeKeyFuzz_OK(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
k, err := committeeKeyFn(c)
if err != nil {
t.Fatal(err)
}
if k != key(c.Seed) {
t.Errorf("Incorrect hash k: %s, expected %s", k, key(c.Seed))
}
}
}
func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
cache := NewCommitteesCache()
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
if err := cache.AddCommitteeShuffledList(c); err != nil {
t.Fatal(err)
}
if _, err := cache.Committee(0, c.Seed, 0); err != nil {
t.Fatal(err)
}
}
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
t.Error("Incorrect key size")
}
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
cache := NewCommitteesCache()
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
if err := cache.AddCommitteeShuffledList(c); err != nil {
t.Fatal(err)
}
indices, err := cache.ActiveIndices(c.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, c.SortedIndices) {
t.Error("Saved indices not the same")
}
}
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
t.Error("Incorrect key size")
}
}

View File

@@ -2,25 +2,27 @@ package cache
import (
"reflect"
"sort"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestCommitteeKeyFn_OK(t *testing.T) {
item := &Committee{
Epoch: 999,
CommitteeCount: 1,
Committee: []uint64{1, 2, 3, 4, 5},
item := &Committees{
CommitteeCount: 1,
Seed: [32]byte{'A'},
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
}
key, err := committeeKeyFn(item)
k, err := committeeKeyFn(item)
if err != nil {
t.Fatal(err)
}
if key != strconv.Itoa(int(item.Epoch)) {
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(item.Epoch)))
if k != key(item.Seed) {
t.Errorf("Incorrect hash k: %s, expected %s", k, key(item.Seed))
}
}
@@ -32,17 +34,17 @@ func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
}
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
cache := NewCommitteeCache()
cache := NewCommitteesCache()
item := &Committee{
Epoch: 1,
Committee: []uint64{1, 2, 3, 4, 5, 6},
CommitteeCount: 3,
item := &Committees{
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
Seed: [32]byte{'A'},
CommitteeCount: 3,
}
slot := uint64(item.Epoch * params.BeaconConfig().SlotsPerEpoch)
slot := params.BeaconConfig().SlotsPerEpoch
committeeIndex := uint64(1)
indices, err := cache.ShuffledIndices(slot, committeeIndex)
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
if err != nil {
t.Fatal(err)
}
@@ -54,102 +56,26 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
t.Fatal(err)
}
wantedIndex := uint64(0)
indices, err = cache.ShuffledIndices(slot, wantedIndex)
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
if err != nil {
t.Fatal(err)
}
start, end := startEndIndices(item, wantedIndex)
if !reflect.DeepEqual(indices, item.Committee[start:end]) {
if !reflect.DeepEqual(indices, item.ShuffledIndices[start:end]) {
t.Errorf(
"Expected fetched active indices to be %v, got %v",
indices,
item.Committee[start:end],
item.ShuffledIndices[start:end],
)
}
}
func TestCommitteeCache_CanRotate(t *testing.T) {
cache := NewCommitteeCache()
item1 := &Committee{Epoch: 1}
if err := cache.AddCommitteeShuffledList(item1); err != nil {
t.Fatal(err)
}
item2 := &Committee{Epoch: 2}
if err := cache.AddCommitteeShuffledList(item2); err != nil {
t.Fatal(err)
}
epochs, err := cache.Epochs()
if err != nil {
t.Fatal(err)
}
wanted := item1.Epoch + item2.Epoch
if sum(epochs) != wanted {
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
}
item3 := &Committee{Epoch: 4}
if err := cache.AddCommitteeShuffledList(item3); err != nil {
t.Fatal(err)
}
epochs, err = cache.Epochs()
if err != nil {
t.Fatal(err)
}
wanted = item1.Epoch + item2.Epoch + item3.Epoch
if sum(epochs) != wanted {
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
}
item4 := &Committee{Epoch: 6}
if err := cache.AddCommitteeShuffledList(item4); err != nil {
t.Fatal(err)
}
epochs, err = cache.Epochs()
if err != nil {
t.Fatal(err)
}
wanted = item2.Epoch + item3.Epoch + item4.Epoch
if sum(epochs) != wanted {
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
}
}
func TestCommitteeCache_EpochInCache(t *testing.T) {
cache := NewCommitteeCache()
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 1}); err != nil {
t.Fatal(err)
}
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 2}); err != nil {
t.Fatal(err)
}
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 99}); err != nil {
t.Fatal(err)
}
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 100}); err != nil {
t.Fatal(err)
}
inCache, err := cache.EpochInCache(1)
if err != nil {
t.Fatal(err)
}
if inCache {
t.Error("Epoch shouldn't be in cache")
}
inCache, err = cache.EpochInCache(100)
if err != nil {
t.Fatal(err)
}
if !inCache {
t.Error("Epoch should be in cache")
}
}
func TestCommitteeCache_ActiveIndices(t *testing.T) {
cache := NewCommitteeCache()
cache := NewCommitteesCache()
item := &Committee{Epoch: 1, Committee: []uint64{1, 2, 3, 4, 5, 6}}
indices, err := cache.ActiveIndices(1)
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
indices, err := cache.ActiveIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
@@ -161,19 +87,88 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
t.Fatal(err)
}
indices, err = cache.ActiveIndices(1)
indices, err = cache.ActiveIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, item.Committee) {
if !reflect.DeepEqual(indices, item.SortedIndices) {
t.Error("Did not receive correct active indices from cache")
}
}
func sum(values []uint64) uint64 {
sum := uint64(0)
for _, v := range values {
sum = v + sum
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
cache := NewCommitteesCache()
seed := [32]byte{'A'}
indices := []uint64{1, 2, 3, 4, 5}
indices, err := cache.ProposerIndices(seed)
if err != nil {
t.Fatal(err)
}
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
if err := cache.AddProposerIndicesList(seed, indices); err != nil {
t.Fatal(err)
}
received, err := cache.ProposerIndices(seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, received) {
t.Error("Did not receive correct proposer indices from cache")
}
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
indices, err = cache.ProposerIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
if err := cache.AddProposerIndicesList(item.Seed, indices); err != nil {
t.Fatal(err)
}
received, err = cache.ProposerIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, received) {
t.Error("Did not receive correct proposer indices from cache")
}
}
func TestCommitteeCache_CanRotate(t *testing.T) {
cache := NewCommitteesCache()
// Should rotate out all the epochs except 190 through 199.
for i := 100; i < 200; i++ {
s := []byte(strconv.Itoa(i))
item := &Committees{Seed: bytesutil.ToBytes32(s)}
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
}
k := cache.CommitteeCache.ListKeys()
if len(k) != maxCommitteesCacheSize {
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
}
sort.Slice(k, func(i, j int) bool {
return k[i] < k[j]
})
s := bytesutil.ToBytes32([]byte(strconv.Itoa(190)))
if k[0] != key(s) {
t.Error("incorrect key received for slot 190")
}
s = bytesutil.ToBytes32([]byte(strconv.Itoa(199)))
if k[len(k)-1] != key(s) {
t.Error("incorrect key received for slot 199")
}
return sum
}

View File

@@ -9,10 +9,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//proto/eth/v1alpha1:go_default_library",
"//proto/beacon/db:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -26,9 +28,10 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//proto/eth/v1alpha1:go_default_library",
"//proto/beacon/db:go_default_library",
"//shared/bytesutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -10,7 +10,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -33,28 +35,19 @@ type DepositFetcher interface {
// stores all the deposit related data that is required by the beacon-node.
type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*DepositContainer
deposits []*DepositContainer
pendingDeposits []*dbpb.DepositContainer
deposits []*dbpb.DepositContainer
depositsLock sync.RWMutex
chainStartDeposits []*ethpb.Deposit
chainstartPubkeys map[string]bool
chainstartPubkeysLock sync.RWMutex
}
// DepositContainer object for holding the deposit and a reference to the block in
// which the deposit transaction was included in the proof of work chain.
type DepositContainer struct {
Deposit *ethpb.Deposit
Block *big.Int
Index int
depositRoot [32]byte
}
// NewDepositCache instantiates a new deposit cache
func NewDepositCache() *DepositCache {
return &DepositCache{
pendingDeposits: []*DepositContainer{},
deposits: []*DepositContainer{},
pendingDeposits: []*dbpb.DepositContainer{},
deposits: []*dbpb.DepositContainer{},
chainstartPubkeys: make(map[string]bool),
chainStartDeposits: make([]*ethpb.Deposit, 0),
}
@@ -62,10 +55,10 @@ func NewDepositCache() *DepositCache {
// InsertDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertDeposit")
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil || blockNum == nil {
if d == nil {
log.WithFields(log.Fields{
"block": blockNum,
"deposit": d,
@@ -78,14 +71,36 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
defer dc.depositsLock.Unlock()
// keep the slice sorted on insertion in order to avoid costly sorting on retrival.
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
newDeposits := append([]*DepositContainer{{Deposit: d, Block: blockNum, depositRoot: depositRoot, Index: index}}, dc.deposits[heightIdx:]...)
newDeposits := append([]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}}, dc.deposits[heightIdx:]...)
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
historicalDepositsCount.Inc()
}
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbpb.DepositContainer) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
dc.deposits = ctrs
historicalDepositsCount.Add(float64(len(ctrs)))
}
// AllDepositContainers returns a list of deposits all historical deposit containers until the given block number.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return dc.deposits
}
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.MarkPubkeyForChainstart")
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
defer span.End()
dc.chainstartPubkeysLock.Lock()
defer dc.chainstartPubkeysLock.Unlock()
@@ -94,7 +109,7 @@ func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey stri
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.PubkeyInChainstart")
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
defer span.End()
dc.chainstartPubkeysLock.Lock()
defer dc.chainstartPubkeysLock.Unlock()
@@ -108,14 +123,14 @@ func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) b
// AllDeposits returns a list of deposits all historical deposits until the given block number
// (inclusive). If no block is specified then this method returns all historical deposits.
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDeposits")
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var deposits []*ethpb.Deposit
for _, ctnr := range dc.deposits {
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
deposits = append(deposits, ctnr.Deposit)
}
}
@@ -125,23 +140,23 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
ctx, span := trace.StartSpan(ctx, "Beacondb.DepositsNumberAndRootAtHeight")
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Block.Cmp(blockHeight) > 0 })
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
// deposit.
if heightIdx == 0 {
return 0, [32]byte{}
}
return uint64(heightIdx), dc.deposits[heightIdx-1].depositRoot
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
}
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositByPubkey")
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@@ -151,7 +166,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
for _, ctnr := range dc.deposits {
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
deposit = ctnr.Deposit
blockNum = ctnr.Block
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
break
}
}

View File

@@ -6,7 +6,8 @@ import (
"math/big"
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -19,21 +20,7 @@ func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
hook := logTest.NewGlobal()
dc := DepositCache{}
dc.InsertDeposit(context.Background(), nil, big.NewInt(1), 0, [32]byte{})
if len(dc.deposits) != 0 {
t.Fatal("Number of deposits changed")
}
if hook.LastEntry().Message != nilDepositErr {
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
}
}
func TestBeaconDB_InsertDeposit_LogsOnNilBlockNumberInsertion(t *testing.T) {
hook := logTest.NewGlobal()
dc := DepositCache{}
dc.InsertDeposit(context.Background(), &ethpb.Deposit{}, nil, 0, [32]byte{})
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
if len(dc.deposits) != 0 {
t.Fatal("Number of deposits changed")
@@ -47,27 +34,27 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
dc := DepositCache{}
insertions := []struct {
blkNum *big.Int
blkNum uint64
deposit *ethpb.Deposit
index int
index int64
}{
{
blkNum: big.NewInt(0),
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 0,
},
{
blkNum: big.NewInt(0),
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 3,
},
{
blkNum: big.NewInt(0),
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 1,
},
{
blkNum: big.NewInt(0),
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 4,
},
@@ -77,7 +64,7 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
expectedIndices := []int{0, 1, 3, 4}
expectedIndices := []int64{0, 1, 3, 4}
for i, ei := range expectedIndices {
if dc.deposits[i].Index != ei {
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
@@ -88,34 +75,34 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
dc := DepositCache{}
deposits := []*DepositContainer{
deposits := []*dbpb.DepositContainer{
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
@@ -129,34 +116,34 @@ func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
dc := DepositCache{}
deposits := []*DepositContainer{
deposits := []*dbpb.DepositContainer{
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
@@ -171,35 +158,35 @@ func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testi
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*DepositContainer{
dc.deposits = []*dbpb.DepositContainer{
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
depositRoot: bytesutil.ToBytes32([]byte("root")),
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Block: big.NewInt(12),
Deposit: &ethpb.Deposit{},
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
@@ -216,16 +203,16 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*DepositContainer{
dc.deposits = []*dbpb.DepositContainer{
{
Block: big.NewInt(10),
Deposit: &ethpb.Deposit{},
depositRoot: bytesutil.ToBytes32([]byte("root")),
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Block: big.NewInt(11),
Deposit: &ethpb.Deposit{},
depositRoot: bytesutil.ToBytes32([]byte("root")),
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
}
@@ -242,9 +229,9 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLes
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*DepositContainer{
dc.deposits = []*dbpb.DepositContainer{
{
Block: big.NewInt(9),
Eth1BlockHeight: 9,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk0"),
@@ -252,7 +239,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
},
},
{
Block: big.NewInt(10),
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
@@ -260,7 +247,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
},
},
{
Block: big.NewInt(11),
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
@@ -268,7 +255,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
},
},
{
Block: big.NewInt(12),
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk2"),

View File

@@ -7,7 +7,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/hashutil"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -23,15 +24,15 @@ var (
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer
}
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertPendingDeposit")
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil || blockNum == nil {
if d == nil {
log.WithFields(log.Fields{
"block": blockNum,
"deposit": d,
@@ -40,7 +41,8 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
dc.pendingDeposits = append(dc.pendingDeposits, &DepositContainer{Deposit: d, Block: blockNum, Index: index, depositRoot: depositRoot})
dc.pendingDeposits = append(dc.pendingDeposits,
&dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
pendingDepositsCount.Inc()
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
}
@@ -54,9 +56,9 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var depositCntrs []*DepositContainer
var depositCntrs []*dbpb.DepositContainer
for _, ctnr := range dc.pendingDeposits {
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
@@ -77,15 +79,15 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer {
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var depositCntrs []*DepositContainer
var depositCntrs []*dbpb.DepositContainer
for _, ctnr := range dc.pendingDeposits {
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
@@ -151,9 +153,9 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
var cleanDeposits []*DepositContainer
var cleanDeposits []*dbpb.DepositContainer
for _, dp := range dc.pendingDeposits {
if dp.Index >= merkleTreeIndex {
if dp.Index >= int64(merkleTreeIndex) {
cleanDeposits = append(cleanDeposits, dp)
}
}

View File

@@ -7,14 +7,15 @@ import (
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
)
var _ = PendingDepositsFetcher(&DepositCache{})
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, big.NewInt(111), 100, [32]byte{})
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
if len(dc.pendingDeposits) != 1 {
t.Error("Deposit not inserted")
@@ -23,7 +24,7 @@ func TestInsertPendingDeposit_OK(t *testing.T) {
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, nil /*blockNum*/, 0, [32]byte{})
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
if len(dc.pendingDeposits) > 0 {
t.Error("Unexpected deposit insertion")
@@ -34,7 +35,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
depToRemove := &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}
otherDep := &ethpb.Deposit{Proof: [][]byte{[]byte("B")}}
db.pendingDeposits = []*DepositContainer{
db.pendingDeposits = []*dbpb.DepositContainer{
{Deposit: depToRemove, Index: 1},
{Deposit: otherDep, Index: 5},
}
@@ -47,7 +48,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
if len(dc.pendingDeposits) != 1 {
t.Errorf("Deposit unexpectedly removed")
@@ -57,7 +58,7 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
func TestPendingDeposit_RoundTrip(t *testing.T) {
dc := DepositCache{}
dep := &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}
dc.InsertPendingDeposit(context.Background(), dep, big.NewInt(111), 100, [32]byte{})
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
if len(dc.pendingDeposits) != 0 {
t.Error("Failed to insert & delete a pending deposit")
@@ -67,10 +68,10 @@ func TestPendingDeposit_RoundTrip(t *testing.T) {
func TestPendingDeposits_OK(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*DepositContainer{
{Block: big.NewInt(2), Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
{Block: big.NewInt(4), Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("B")}}},
{Block: big.NewInt(6), Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("c")}}},
dc.pendingDeposits = []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
{Eth1BlockHeight: 4, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("B")}}},
{Eth1BlockHeight: 6, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("c")}}},
}
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
@@ -92,25 +93,24 @@ func TestPendingDeposits_OK(t *testing.T) {
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*DepositContainer{
{Block: big.NewInt(2), Index: 2},
{Block: big.NewInt(4), Index: 4},
{Block: big.NewInt(6), Index: 6},
{Block: big.NewInt(8), Index: 8},
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
dc.pendingDeposits = []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
{Eth1BlockHeight: 4, Index: 4},
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 0)
expected := []*DepositContainer{
{Block: big.NewInt(2), Index: 2},
{Block: big.NewInt(4), Index: 4},
{Block: big.NewInt(6), Index: 6},
{Block: big.NewInt(8), Index: 8},
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
expected := []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
{Eth1BlockHeight: 4, Index: 4},
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
}
@@ -119,40 +119,40 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
func TestPrunePendingDeposits_OK(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*DepositContainer{
{Block: big.NewInt(2), Index: 2},
{Block: big.NewInt(4), Index: 4},
{Block: big.NewInt(6), Index: 6},
{Block: big.NewInt(8), Index: 8},
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
dc.pendingDeposits = []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
{Eth1BlockHeight: 4, Index: 4},
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 6)
expected := []*DepositContainer{
{Block: big.NewInt(6), Index: 6},
{Block: big.NewInt(8), Index: 8},
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
expected := []*dbpb.DepositContainer{
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
}
dc.pendingDeposits = []*DepositContainer{
{Block: big.NewInt(2), Index: 2},
{Block: big.NewInt(4), Index: 4},
{Block: big.NewInt(6), Index: 6},
{Block: big.NewInt(8), Index: 8},
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
dc.pendingDeposits = []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
{Eth1BlockHeight: 4, Index: 4},
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 10)
expected = []*DepositContainer{
{Block: big.NewInt(10), Index: 10},
{Block: big.NewInt(12), Index: 12},
expected = []*dbpb.DepositContainer{
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {

View File

@@ -91,7 +91,7 @@ func TestEth1Data_MaxSize(t *testing.T) {
for i := 0; i < maxEth1DataVoteSize+1; i++ {
var hash [32]byte
copy(hash[:], []byte(strconv.Itoa(i)))
copy(hash[:], strconv.Itoa(i))
eInfo := &Eth1DataVote{
Eth1DataHash: hash,
}

View File

@@ -3,12 +3,7 @@ package cache
import "github.com/prysmaticlabs/prysm/shared/featureconfig"
func init() {
featureconfig.Init(&featureconfig.Flag{
EnableAttestationCache: true,
EnableEth1DataVoteCache: true,
EnableShuffledIndexCache: true,
EnableCommitteeCache: true,
EnableActiveCountCache: true,
EnableActiveIndicesCache: true,
featureconfig.Init(&featureconfig.Flags{
EnableEth1DataVoteCache: true,
})
}

61
beacon-chain/cache/hot_state_cache.go vendored Normal file
View File

@@ -0,0 +1,61 @@
package cache
import (
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
var (
// hotStateCacheSize defines the max number of hot state this can cache.
hotStateCacheSize = 16
// Metrics
hotStateCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "hot_state_cache_hit",
Help: "The total number of cache hits on the hot state cache.",
})
hotStateCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "hot_state_cache_miss",
Help: "The total number of cache misses on the hot state cache.",
})
)
// HotStateCache is used to store the processed beacon state after finalized check point..
type HotStateCache struct {
cache *lru.Cache
}
// NewHotStateCache initializes the map and underlying cache.
func NewHotStateCache() *HotStateCache {
cache, err := lru.New(hotStateCacheSize)
if err != nil {
panic(err)
}
return &HotStateCache{
cache: cache,
}
}
// Get returns a cached response via input block root, if any.
// The response is copied by default.
func (c *HotStateCache) Get(root [32]byte) *stateTrie.BeaconState {
item, exists := c.cache.Get(root)
if exists && item != nil {
hotStateCacheHit.Inc()
return item.(*stateTrie.BeaconState).Copy()
}
hotStateCacheMiss.Inc()
return nil
}
// Put the response in the cache.
func (c *HotStateCache) Put(root [32]byte, state *stateTrie.BeaconState) {
c.cache.Add(root, state)
}
// Has returns true if the key exists in the cache.
func (c *HotStateCache) Has(root [32]byte) bool {
return c.cache.Contains(root)
}

View File

@@ -0,0 +1,41 @@
package cache_test
import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestHotStateCache_RoundTrip(t *testing.T) {
c := cache.NewHotStateCache()
root := [32]byte{'A'}
state := c.Get(root)
if state != nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if c.Has(root) {
t.Error("Empty cache has an object")
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
if err != nil {
t.Fatal(err)
}
c.Put(root, state)
if !c.Has(root) {
t.Error("Empty cache does not have an object")
}
res := c.Get(root)
if state == nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
t.Error("Expected equal protos to return from cache")
}
}

130
beacon-chain/cache/skip_slot_cache.go vendored Normal file
View File

@@ -0,0 +1,130 @@
package cache
import (
"context"
"math"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
var (
// Metrics
skipSlotCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "skip_slot_cache_hit",
Help: "The total number of cache hits on the skip slot cache.",
})
skipSlotCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "skip_slot_cache_miss",
Help: "The total number of cache misses on the skip slot cache.",
})
)
// SkipSlotCache is used to store the cached results of processing skip slots in state.ProcessSlots.
type SkipSlotCache struct {
cache *lru.Cache
lock sync.RWMutex
inProgress map[uint64]bool
}
// NewSkipSlotCache initializes the map and underlying cache.
func NewSkipSlotCache() *SkipSlotCache {
cache, err := lru.New(8)
if err != nil {
panic(err)
}
return &SkipSlotCache{
cache: cache,
inProgress: make(map[uint64]bool),
}
}
// Get waits for any in progress calculation to complete before returning a
// cached response, if any.
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
if !featureconfig.Get().EnableSkipSlotsCache {
// Return a miss result if cache is not enabled.
skipSlotCacheMiss.Inc()
return nil, nil
}
delay := minDelay
// Another identical request may be in progress already. Let's wait until
// any in progress request resolves or our timeout is exceeded.
for {
if ctx.Err() != nil {
return nil, ctx.Err()
}
c.lock.RLock()
if !c.inProgress[slot] {
c.lock.RUnlock()
break
}
c.lock.RUnlock()
// This increasing backoff is to decrease the CPU cycles while waiting
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = math.Min(delay, maxDelay)
}
item, exists := c.cache.Get(slot)
if exists && item != nil {
skipSlotCacheHit.Inc()
return item.(*stateTrie.BeaconState).Copy(), nil
}
skipSlotCacheMiss.Inc()
return nil, nil
}
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
if !featureconfig.Get().EnableSkipSlotsCache {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
if c.inProgress[slot] {
return ErrAlreadyInProgress
}
c.inProgress[slot] = true
return nil
}
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
if !featureconfig.Get().EnableSkipSlotsCache {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
delete(c.inProgress, slot)
return nil
}
// Put the response in the cache.
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
if !featureconfig.Get().EnableSkipSlotsCache {
return nil
}
// Copy state so cached value is not mutated.
c.cache.Add(slot, state.Copy())
return nil
}

View File

@@ -0,0 +1,57 @@
package cache_test
import (
"context"
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func TestSkipSlotCache_RoundTrip(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
fc := featureconfig.Get()
fc.EnableSkipSlotsCache = true
featureconfig.Init(fc)
state, err := c.Get(ctx, 5)
if err != nil {
t.Error(err)
}
if state != nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if err := c.MarkInProgress(5); err != nil {
t.Error(err)
}
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
if err != nil {
t.Fatal(err)
}
if err = c.Put(ctx, 5, state); err != nil {
t.Error(err)
}
if err := c.MarkNotInProgress(5); err != nil {
t.Error(err)
}
res, err := c.Get(ctx, 5)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
t.Error("Expected equal protos to return from cache")
}
}

View File

@@ -14,19 +14,22 @@ go_library(
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state/stateutils:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
@@ -37,6 +40,7 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"block_operations_fuzz_test.go",
"block_operations_test.go",
"block_test.go",
"eth1_data_test.go",
@@ -44,17 +48,17 @@ go_test(
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/core/state/stateutils:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_phoreproject_bls//:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -4,18 +4,20 @@
package blocks
import (
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
// NewGenesisBlock returns the canonical, genesis block for the beacon chain protocol.
func NewGenesisBlock(stateRoot []byte) *ethpb.BeaconBlock {
func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
zeroHash := params.BeaconConfig().ZeroHash[:]
genBlock := &ethpb.BeaconBlock{
ParentRoot: zeroHash,
StateRoot: stateRoot,
Body: &ethpb.BeaconBlockBody{},
Signature: params.BeaconConfig().EmptySignature[:],
}
return genBlock
return &ethpb.SignedBeaconBlock{
Block: genBlock,
Signature: params.BeaconConfig().EmptySignature[:],
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,441 @@
package blocks
import (
"context"
"testing"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
fuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
//"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ctx := context.Background()
state := &ethereum_beacon_p2p_v1.BeaconState{}
att := &eth.Attestation{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(att)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
_, _ = ProcessAttestationNoVerify(ctx, s, att)
}
}
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
block := &eth.SignedBeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
_, _ = ProcessBlockHeader(s, block)
}
}
func TestFuzzverifySigningRoot_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
pubkey := [48]byte{}
sig := [96]byte{}
domain := [4]byte{}
p := []byte{}
s := []byte{}
d := uint64(0)
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(&pubkey)
fuzzer.Fuzz(&sig)
fuzzer.Fuzz(&domain)
fuzzer.Fuzz(state)
fuzzer.Fuzz(&p)
fuzzer.Fuzz(&s)
fuzzer.Fuzz(&d)
domain := bytesutil.FromBytes4(domain[:])
verifySigningRoot(state, pubkey[:], sig[:], domain)
verifySigningRoot(state, p, s, d)
}
}
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ba := []byte{}
pubkey := [48]byte{}
sig := [96]byte{}
domain := [4]byte{}
p := []byte{}
s := []byte{}
d := uint64(0)
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(&ba)
fuzzer.Fuzz(&pubkey)
fuzzer.Fuzz(&sig)
fuzzer.Fuzz(&domain)
fuzzer.Fuzz(&p)
fuzzer.Fuzz(&s)
fuzzer.Fuzz(&d)
domain := bytesutil.FromBytes4(domain[:])
verifySignature(ba, pubkey[:], sig[:], domain)
verifySignature(ba, p, s, d)
}
}
func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
block := &eth.BeaconBlock{}
state := &stateTrie.BeaconState{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, err := ProcessEth1DataInBlock(state, block)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for state: %v and block: %v", s, err, state, block)
}
}
}
func TestFuzzareEth1DataEqual_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
eth1data := &eth.Eth1Data{}
eth1data2 := &eth.Eth1Data{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(eth1data2)
areEth1DataEqual(eth1data, eth1data2)
areEth1DataEqual(eth1data, eth1data)
}
}
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
eth1data := &eth.Eth1Data{}
stateVotes := []*eth.Eth1Data{}
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(&stateVotes)
s, _ := beaconstate.InitializeFromProto(&ethereum_beacon_p2p_v1.BeaconState{
Eth1DataVotes: stateVotes,
})
Eth1DataHasEnoughSupport(s, eth1data)
}
}
func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
block := &eth.BeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
_, _ = ProcessBlockHeaderNoVerify(s, block)
}
}
func TestFuzzProcessRandao_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessRandao(s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessRandaoNoVerify(s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessProposerSlashings(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
proposerSlashing := &eth.ProposerSlashing{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(proposerSlashing)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
VerifyProposerSlashing(s, proposerSlashing)
}
}
func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessAttesterSlashings(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
attesterSlashing := &eth.AttesterSlashing{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attesterSlashing)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
VerifyAttesterSlashing(ctx, s, attesterSlashing)
}
}
func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attestationData := &eth.AttestationData{}
attestationData2 := &eth.AttestationData{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attestationData)
fuzzer.Fuzz(attestationData2)
IsSlashableAttestationData(attestationData, attestationData2)
}
}
func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attesterSlashing := &eth.AttesterSlashing{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attesterSlashing)
slashableAttesterIndices(attesterSlashing)
}
}
func TestFuzzProcessAttestations_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessAttestations(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessAttestationsNoVerify(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
attestation := &eth.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessAttestation(ctx, s, attestation)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, attestation)
}
}
}
func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
idxAttestation := &eth.IndexedAttestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(idxAttestation)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
VerifyIndexedAttestation(ctx, s, idxAttestation)
}
}
func TestFuzzVerifyAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
attestation := &eth.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
VerifyAttestation(ctx, s, attestation)
}
}
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessDeposits(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
deposit := &eth.Deposit{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessPreGenesisDeposit(ctx, s, deposit)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
}
}
func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
deposit := &eth.Deposit{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessDeposit(s, deposit)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
}
}
func TestFuzzverifyDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
deposit := &eth.Deposit{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
verifyDeposit(s, deposit)
}
}
func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessVoluntaryExits(ctx, s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessVoluntaryExitsNoVerify(s, blockBody)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
}
}
func TestFuzzVerifyExit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &eth.SignedVoluntaryExit{}
val := &eth.Validator{}
fork := &pb.Fork{}
var slot uint64
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(ve)
fuzzer.Fuzz(val)
fuzzer.Fuzz(fork)
fuzzer.Fuzz(&slot)
VerifyExit(val, slot, fork, ve)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -11,11 +11,11 @@ func TestGenesisBlock_InitializedCorrectly(t *testing.T) {
stateHash := []byte{0}
b1 := blocks.NewGenesisBlock(stateHash)
if b1.ParentRoot == nil {
if b1.Block.ParentRoot == nil {
t.Error("genesis block missing ParentHash field")
}
if !bytes.Equal(b1.StateRoot, stateHash) {
if !bytes.Equal(b1.Block.StateRoot, stateHash) {
t.Error("genesis block StateRootHash32 isn't initialized correctly")
}
}

View File

@@ -4,9 +4,10 @@ import (
"fmt"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -92,9 +93,9 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
c.SlotsPerEth1VotingPeriod = tt.votingPeriodLength
params.OverrideBeaconConfig(c)
s := &pb.BeaconState{
s, _ := beaconstate.InitializeFromProto(&pb.BeaconState{
Eth1DataVotes: tt.stateVotes,
}
})
result, err := blocks.Eth1DataHasEnoughSupport(s, tt.data)
if err != nil {
t.Fatal(err)

View File

@@ -35,11 +35,12 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/core/state/stateutils:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/params/spectest:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
@@ -68,11 +69,12 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/core/state/stateutils:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/params/spectest:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",

View File

@@ -4,9 +4,9 @@ import (
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)

View File

@@ -4,9 +4,9 @@ import (
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)

View File

@@ -8,11 +8,11 @@ import (
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
@@ -26,7 +26,6 @@ func runBlockHeaderTest(t *testing.T, config string) {
testFolders, testsFolderPath := testutil.TestFolders(t, config, "operations/block_header/pyspec_tests")
for _, folder := range testFolders {
t.Run(folder.Name(), func(t *testing.T) {
helpers.ClearAllCaches()
blockFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "block.ssz")
if err != nil {
t.Fatal(err)
@@ -40,10 +39,14 @@ func runBlockHeaderTest(t *testing.T, config string) {
if err != nil {
t.Fatal(err)
}
preBeaconState := &pb.BeaconState{}
if err := ssz.Unmarshal(preBeaconStateFile, preBeaconState); err != nil {
preBeaconStateBase := &pb.BeaconState{}
if err := ssz.Unmarshal(preBeaconStateFile, preBeaconStateBase); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
preBeaconState, err := beaconstate.InitializeFromProto(preBeaconStateBase)
if err != nil {
t.Fatal(err)
}
// If the post.ssz is not present, it means the test should fail on our end.
postSSZFilepath, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "post.ssz"))
@@ -54,7 +57,8 @@ func runBlockHeaderTest(t *testing.T, config string) {
t.Fatal(err)
}
beaconState, err := blocks.ProcessBlockHeader(preBeaconState, block)
// Spectest blocks are not signed, so we'll call NoVerify to skip sig verification.
beaconState, err := blocks.ProcessBlockHeaderNoVerify(preBeaconState, block)
if postSSZExists {
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -69,9 +73,8 @@ func runBlockHeaderTest(t *testing.T, config string) {
if err := ssz.Unmarshal(postBeaconStateFile, postBeaconState); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
if !proto.Equal(beaconState, postBeaconState) {
diff, _ := messagediff.PrettyDiff(beaconState, postBeaconState)
if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) {
diff, _ := messagediff.PrettyDiff(beaconState.CloneInnerState(), postBeaconState)
t.Log(diff)
t.Fatal("Post state does not match expected")
}

View File

@@ -10,11 +10,12 @@ import (
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
@@ -28,15 +29,19 @@ func runBlockProcessingTest(t *testing.T, config string) {
testFolders, testsFolderPath := testutil.TestFolders(t, config, "sanity/blocks/pyspec_tests")
for _, folder := range testFolders {
t.Run(folder.Name(), func(t *testing.T) {
helpers.ClearAllCaches()
helpers.ClearCache()
preBeaconStateFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz")
if err != nil {
t.Fatal(err)
}
beaconState := &pb.BeaconState{}
if err := ssz.Unmarshal(preBeaconStateFile, beaconState); err != nil {
beaconStateBase := &pb.BeaconState{}
if err := ssz.Unmarshal(preBeaconStateFile, beaconStateBase); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
beaconState, err := beaconstate.InitializeFromProto(beaconStateBase)
if err != nil {
t.Fatal(err)
}
file, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "meta.yaml")
if err != nil {
@@ -55,7 +60,7 @@ func runBlockProcessingTest(t *testing.T, config string) {
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{}
block := &ethpb.SignedBeaconBlock{}
if err := ssz.Unmarshal(blockFile, block); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
@@ -89,8 +94,8 @@ func runBlockProcessingTest(t *testing.T, config string) {
t.Fatalf("Failed to unmarshal: %v", err)
}
if !proto.Equal(beaconState, postBeaconState) {
diff, _ := messagediff.PrettyDiff(beaconState, postBeaconState)
if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) {
diff, _ := messagediff.PrettyDiff(beaconState.CloneInnerState(), postBeaconState)
t.Log(diff)
t.Fatal("Post state does not match expected")
}

View File

@@ -4,9 +4,9 @@ import (
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)

View File

@@ -4,9 +4,9 @@ import (
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)

View File

@@ -4,9 +4,9 @@ import (
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
@@ -24,12 +24,12 @@ func runVoluntaryExitTest(t *testing.T, config string) {
if err != nil {
t.Fatal(err)
}
voluntaryExit := &ethpb.VoluntaryExit{}
voluntaryExit := &ethpb.SignedVoluntaryExit{}
if err := ssz.Unmarshal(exitFile, voluntaryExit); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
body := &ethpb.BeaconBlockBody{VoluntaryExits: []*ethpb.VoluntaryExit{voluntaryExit}}
body := &ethpb.BeaconBlockBody{VoluntaryExits: []*ethpb.SignedVoluntaryExit{voluntaryExit}}
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessVoluntaryExits)
})
}

View File

@@ -2,20 +2,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"epoch_processing.go",
"participation.go",
],
srcs = ["epoch_processing.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
],
)
@@ -24,17 +23,18 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
"epoch_processing_fuzz_test.go",
"epoch_processing_test.go",
"participation_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -5,93 +5,35 @@
package epoch
import (
"bytes"
"fmt"
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// MatchedAttestations is an object that contains the correctly
// voted attestations based on source, target and head criteria.
type MatchedAttestations struct {
source []*pb.PendingAttestation
Target []*pb.PendingAttestation
head []*pb.PendingAttestation
// sortableIndices implements the Sort interface to sort newly activated validator indices
// by activation epoch and by index number.
type sortableIndices struct {
indices []uint64
validators []*ethpb.Validator
}
// MatchAttestations matches the attestations gathered in a span of an epoch
// and categorize them whether they correctly voted for source, target and head.
// We combined the individual helpers from spec for efficiency and to achieve O(N) run time.
//
// Spec pseudocode definition:
// def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
// assert epoch in (get_current_epoch(state), get_previous_epoch(state))
// return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations
//
// def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
// return [
// a for a in get_matching_source_attestations(state, epoch)
// if a.data.target_root == get_block_root(state, epoch)
// ]
//
// def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
// return [
// a for a in get_matching_source_attestations(state, epoch)
// if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data))
// ]
func MatchAttestations(state *pb.BeaconState, epoch uint64) (*MatchedAttestations, error) {
currentEpoch := helpers.CurrentEpoch(state)
previousEpoch := helpers.PrevEpoch(state)
// Input epoch for matching the source attestations has to be within range
// of current epoch & previous epoch.
if epoch != currentEpoch && epoch != previousEpoch {
return nil, fmt.Errorf("input epoch: %d != current epoch: %d or previous epoch: %d",
epoch, currentEpoch, previousEpoch)
func (s sortableIndices) Len() int { return len(s.indices) }
func (s sortableIndices) Swap(i, j int) { s.indices[i], s.indices[j] = s.indices[j], s.indices[i] }
func (s sortableIndices) Less(i, j int) bool {
if s.validators[s.indices[i]].ActivationEligibilityEpoch == s.validators[s.indices[j]].ActivationEligibilityEpoch {
return s.indices[i] < s.indices[j]
}
// Decide if the source attestations are coming from current or previous epoch.
var srcAtts []*pb.PendingAttestation
if epoch == currentEpoch {
srcAtts = state.CurrentEpochAttestations
} else {
srcAtts = state.PreviousEpochAttestations
}
targetRoot, err := helpers.BlockRoot(state, epoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for epoch %d", epoch)
}
tgtAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
headAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
for _, srcAtt := range srcAtts {
// If the target root matches attestation's target root,
// then we know this attestation has correctly voted for target.
if bytes.Equal(srcAtt.Data.Target.Root, targetRoot) {
tgtAtts = append(tgtAtts, srcAtt)
}
headRoot, err := helpers.BlockRootAtSlot(state, srcAtt.Data.Slot)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for slot %d", srcAtt.Data.Slot)
}
if bytes.Equal(srcAtt.Data.BeaconBlockRoot, headRoot) {
headAtts = append(headAtts, srcAtt)
}
}
return &MatchedAttestations{
source: srcAtts,
Target: tgtAtts,
head: headAtts,
}, nil
return s.validators[s.indices[i]].ActivationEligibilityEpoch < s.validators[s.indices[j]].ActivationEligibilityEpoch
}
// AttestingBalance returns the total balance from all the attesting indices.
@@ -103,7 +45,7 @@ func MatchAttestations(state *pb.BeaconState, epoch uint64) (*MatchedAttestation
// Spec pseudocode definition:
// def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei:
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
func AttestingBalance(state *pb.BeaconState, atts []*pb.PendingAttestation) (uint64, error) {
func AttestingBalance(state *stateTrie.BeaconState, atts []*pb.PendingAttestation) (uint64, error) {
indices, err := unslashedAttestingIndices(state, atts)
if err != nil {
return 0, errors.Wrap(err, "could not get attesting indices")
@@ -111,183 +53,42 @@ func AttestingBalance(state *pb.BeaconState, atts []*pb.PendingAttestation) (uin
return helpers.TotalBalance(state, indices), nil
}
// ProcessJustificationAndFinalization processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
//
// Spec pseudocode definition:
// def process_justification_and_finalization(state: BeaconState) -> None:
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
// return
//
// previous_epoch = get_previous_epoch(state)
// current_epoch = get_current_epoch(state)
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
// old_current_justified_checkpoint = state.current_justified_checkpoint
//
// # Process justifications
// state.previous_justified_checkpoint = state.current_justified_checkpoint
// state.justification_bits[1:] = state.justification_bits[:-1]
// state.justification_bits[0] = 0b0
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
// root=get_block_root(state, previous_epoch))
// state.justification_bits[1] = 0b1
// matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
// root=get_block_root(state, current_epoch))
// state.justification_bits[0] = 0b1
//
// # Process finalizations
// bits = state.justification_bits
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
func ProcessJustificationAndFinalization(state *pb.BeaconState, prevAttestedBal uint64, currAttestedBal uint64) (*pb.BeaconState, error) {
if state.Slot <= helpers.StartSlot(2) {
return state, nil
}
prevEpoch := helpers.PrevEpoch(state)
currentEpoch := helpers.CurrentEpoch(state)
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint
totalBal, err := helpers.TotalActiveBalance(state)
if err != nil {
return nil, errors.Wrap(err, "could not get total balance")
}
// Process justifications
state.PreviousJustifiedCheckpoint = state.CurrentJustifiedCheckpoint
state.JustificationBits.Shift(1)
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
// We will use that paradigm here for consistency with the godoc spec definition.
// If 2/3 or more of total balance attested in the previous epoch.
if 3*prevAttestedBal >= 2*totalBal {
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
}
state.CurrentJustifiedCheckpoint = &ethpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}
state.JustificationBits.SetBitAt(1, true)
}
// If 2/3 or more of the total balance attested in the current epoch.
if 3*currAttestedBal >= 2*totalBal {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
}
state.CurrentJustifiedCheckpoint = &ethpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}
state.JustificationBits.SetBitAt(0, true)
}
// Process finalization according to ETH2.0 specifications.
justification := state.JustificationBits.Bytes()[0]
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
}
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
}
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
}
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
}
return state, nil
}
// ProcessRewardsAndPenalties processes the rewards and penalties of individual validator.
//
// Spec pseudocode definition:
// def process_rewards_and_penalties(state: BeaconState) -> None:
// if get_current_epoch(state) == GENESIS_EPOCH:
// return
//
// rewards1, penalties1 = get_attestation_deltas(state)
// rewards2, penalties2 = get_crosslink_deltas(state)
// for i in range(len(state.validator_registry)):
// increase_balance(state, i, rewards1[i] + rewards2[i])
// decrease_balance(state, i, penalties1[i] + penalties2[i])
func ProcessRewardsAndPenalties(state *pb.BeaconState) (*pb.BeaconState, error) {
// Can't process rewards and penalties in genesis epoch.
if helpers.CurrentEpoch(state) == 0 {
return state, nil
}
attsRewards, attsPenalties, err := attestationDelta(state)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
for i := 0; i < len(state.Validators); i++ {
state = helpers.IncreaseBalance(state, uint64(i), attsRewards[i])
state = helpers.DecreaseBalance(state, uint64(i), attsPenalties[i])
}
return state, nil
}
// ProcessRegistryUpdates rotates validators in and out of active pool.
// the amount to rotate is determined churn limit.
//
// Spec pseudocode definition:
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validator_registry):
// if (
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
// validator.effective_balance >= MAX_EFFECTIVE_BALANCE
// ):
// validator.activation_eligibility_epoch = get_current_epoch(state)
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
//
// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
// initiate_validator_exit(state, index)
// initiate_validator_exit(state, ValidatorIndex(index))
//
// # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
// # Queue validators eligible for activation and not yet dequeued for activation
// activation_queue = sorted([
// index for index, validator in enumerate(state.validator_registry) if
// validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
// validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
// ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
// # Dequeued validators for activation up to churn limit (without resetting activation epoch)
// for index in activation_queue[:get_churn_limit(state)]:
// validator = state.validator_registry[index]
// if validator.activation_epoch == FAR_FUTURE_EPOCH:
// validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
// index for index, validator in enumerate(state.validators)
// if is_eligible_for_activation(state, validator)
// # Order by the sequence of activation_eligibility_epoch setting and then index
// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
// # Dequeued validators for activation up to churn limit
// for index in activation_queue[:get_validator_churn_limit(state)]:
// validator = state.validators[index]
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
currentEpoch := helpers.CurrentEpoch(state)
vals := state.Validators()
var err error
for idx, validator := range state.Validators {
for idx, validator := range vals {
// Process the validators for activation eligibility.
eligibleToActivate := validator.ActivationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch
properBalance := validator.EffectiveBalance >= params.BeaconConfig().MaxEffectiveBalance
if eligibleToActivate && properBalance {
validator.ActivationEligibilityEpoch = currentEpoch
if helpers.IsEligibleForActivationQueue(validator) {
validator.ActivationEligibilityEpoch = helpers.CurrentEpoch(state) + 1
if err := state.UpdateValidatorAtIndex(uint64(idx), validator); err != nil {
return nil, err
}
}
// Process the validators for ejection.
isActive := helpers.IsActiveValidator(validator, currentEpoch)
belowEjectionBalance := validator.EffectiveBalance <= params.BeaconConfig().EjectionBalance
@@ -299,22 +100,24 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
}
}
// Queue the validators whose eligible to activate and sort them by activation eligibility epoch number
// Queue validators eligible for activation and not yet dequeued for activation.
var activationQ []uint64
for idx, validator := range state.Validators {
eligibleActivated := validator.ActivationEligibilityEpoch != params.BeaconConfig().FarFutureEpoch
canBeActive := validator.ActivationEpoch >= helpers.DelayedActivationExitEpoch(state.FinalizedCheckpoint.Epoch)
if eligibleActivated && canBeActive {
for idx, validator := range vals {
if helpers.IsEligibleForActivation(state, validator) {
activationQ = append(activationQ, uint64(idx))
}
}
sort.Slice(activationQ, func(i, j int) bool {
return state.Validators[i].ActivationEligibilityEpoch < state.Validators[j].ActivationEligibilityEpoch
})
sort.Sort(sortableIndices{indices: activationQ, validators: vals})
// Only activate just enough validators according to the activation churn limit.
limit := len(activationQ)
churnLimit, err := helpers.ValidatorChurnLimit(state)
activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch)
if err != nil {
return nil, errors.Wrap(err, "could not get active validator count")
}
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, errors.Wrap(err, "could not get churn limit")
}
@@ -323,10 +126,15 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
if int(churnLimit) < limit {
limit = int(churnLimit)
}
for _, index := range activationQ[:limit] {
validator := state.Validators[index]
if validator.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
validator.ActivationEpoch = helpers.DelayedActivationExitEpoch(currentEpoch)
validator, err := state.ValidatorAtIndex(index)
if err != nil {
return nil, err
}
validator.ActivationEpoch = helpers.ActivationExitEpoch(currentEpoch)
if err := state.UpdateValidatorAtIndex(index, validator); err != nil {
return nil, err
}
}
return state, nil
@@ -343,7 +151,7 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
// penalty_numerator = validator.effective_balance // increment * min(sum(state.slashings) * 3, total_balance)
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
currentEpoch := helpers.CurrentEpoch(state)
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
@@ -354,22 +162,27 @@ func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
// Compute the sum of state slashings
slashings := state.Slashings()
totalSlashing := uint64(0)
for _, slashing := range state.Slashings {
for _, slashing := range slashings {
totalSlashing += slashing
}
// Compute slashing for each validator.
for index, validator := range state.Validators {
correctEpoch := (currentEpoch + exitLength/2) == validator.WithdrawableEpoch
if validator.Slashed && correctEpoch {
// a callback is used here to apply the following actions to all validators
// below equally.
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) error {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, totalBalance)
increment := params.BeaconConfig().EffectiveBalanceIncrement
penaltyNumerator := validator.EffectiveBalance / increment * minSlashing
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / totalBalance * increment
state = helpers.DecreaseBalance(state, uint64(index), penalty)
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
}
}
}
return nil
})
return state, err
}
@@ -409,54 +222,96 @@ func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
// # Rotate current/previous epoch attestations
// state.previous_epoch_attestations = state.current_epoch_attestations
// state.current_epoch_attestations = []
func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
currentEpoch := helpers.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
// Reset ETH1 data votes.
if (state.Slot+1)%params.BeaconConfig().SlotsPerEth1VotingPeriod == 0 {
state.Eth1DataVotes = []*ethpb.Eth1Data{}
if (state.Slot()+1)%params.BeaconConfig().SlotsPerEth1VotingPeriod == 0 {
if err := state.SetEth1DataVotes([]*ethpb.Eth1Data{}); err != nil {
return nil, err
}
}
bals := state.Balances()
// Update effective balances with hysteresis.
for i, v := range state.Validators {
balance := state.Balances[i]
validatorFunc := func(idx int, val *ethpb.Validator) error {
if val == nil {
return fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
if balance < v.EffectiveBalance || v.EffectiveBalance+3*halfInc < balance {
v.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance
if v.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
v.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
if balance < val.EffectiveBalance || val.EffectiveBalance+3*halfInc < balance {
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance
if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
}
}
return nil
}
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {
return nil, err
}
// Set total slashed balances.
slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector
state.Slashings[nextEpoch%slashedExitLength] = 0
slashedEpoch := int(nextEpoch % slashedExitLength)
slashings := state.Slashings()
if len(slashings) != int(slashedExitLength) {
return nil, fmt.Errorf(
"state slashing length %d different than EpochsPerHistoricalVector %d",
len(slashings),
slashedExitLength,
)
}
if err := state.UpdateSlashingsAtIndex(uint64(slashedEpoch) /* index */, 0 /* value */); err != nil {
return nil, err
}
// Set RANDAO mix.
randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector
mix := helpers.RandaoMix(state, currentEpoch)
state.RandaoMixes[nextEpoch%randaoMixLength] = mix
if state.RandaoMixesLength() != int(randaoMixLength) {
return nil, fmt.Errorf(
"state randao length %d different than EpochsPerHistoricalVector %d",
state.RandaoMixesLength(),
randaoMixLength,
)
}
mix, err := helpers.RandaoMix(state, currentEpoch)
if err != nil {
return nil, err
}
if err := state.UpdateRandaoMixesAtIndex(mix, nextEpoch%randaoMixLength); err != nil {
return nil, err
}
// Set historical root accumulator.
epochsPerHistoricalRoot := params.BeaconConfig().SlotsPerHistoricalRoot / params.BeaconConfig().SlotsPerEpoch
if nextEpoch%epochsPerHistoricalRoot == 0 {
historicalBatch := &pb.HistoricalBatch{
BlockRoots: state.BlockRoots,
StateRoots: state.StateRoots,
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
}
batchRoot, err := ssz.HashTreeRoot(historicalBatch)
if err != nil {
return nil, errors.Wrap(err, "could not hash historical batch")
}
state.HistoricalRoots = append(state.HistoricalRoots, batchRoot[:])
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
return nil, err
}
}
// Rotate current and previous epoch attestations.
state.PreviousEpochAttestations = state.CurrentEpochAttestations
state.CurrentEpochAttestations = []*pb.PendingAttestation{}
if err := state.SetPreviousEpochAttestations(state.CurrentEpochAttestations()); err != nil {
return nil, err
}
if err := state.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
return nil, err
}
return state, nil
}
@@ -470,11 +325,16 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
// for a in attestations:
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
// return set(filter(lambda index: not state.validators[index].slashed, output))
func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestation) ([]uint64, error) {
func unslashedAttestingIndices(state *stateTrie.BeaconState, atts []*pb.PendingAttestation) ([]uint64, error) {
var setIndices []uint64
seen := make(map[uint64]bool)
for _, att := range atts {
attestingIndices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
committee, err := helpers.BeaconCommitteeFromState(state, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, errors.Wrap(err, "could not get attester indices")
}
@@ -492,7 +352,7 @@ func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestat
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
// Remove the slashed validator indices.
for i := 0; i < len(setIndices); i++ {
if state.Validators[setIndices[i]].Slashed {
if v, _ := state.ValidatorAtIndex(setIndices[i]); v != nil && v.Slashed {
setIndices = append(setIndices[:i], setIndices[i+1:]...)
}
}
@@ -511,193 +371,17 @@ func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestat
// total_balance = get_total_active_balance(state)
// effective_balance = state.validator_registry[index].effective_balance
// return effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH
func BaseReward(state *pb.BeaconState, index uint64) (uint64, error) {
func BaseReward(state *stateTrie.BeaconState, index uint64) (uint64, error) {
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return 0, errors.Wrap(err, "could not calculate active balance")
}
effectiveBalance := state.Validators[index].EffectiveBalance
val, err := state.ValidatorAtIndex(index)
if err != nil {
return 0, err
}
effectiveBalance := val.EffectiveBalance
baseReward := effectiveBalance * params.BeaconConfig().BaseRewardFactor /
mathutil.IntegerSquareRoot(totalBalance) / params.BeaconConfig().BaseRewardsPerEpoch
return baseReward, nil
}
// attestationDelta calculates the rewards and penalties of individual
// validator for voting the correct FFG source, FFG target, and head. It
// also calculates proposer delay inclusion and inactivity rewards
// and penalties. Individual rewards and penalties are returned in list.
//
// Note: we calculated adjusted quotient outside of base reward because it's too inefficient
// to repeat the same calculation for every validator versus just doing it once.
//
// Spec pseudocode definition:
// def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
// previous_epoch = get_previous_epoch(state)
// total_balance = get_total_active_balance(state)
// rewards = [Gwei(0) for _ in range(len(state.validators))]
// penalties = [Gwei(0) for _ in range(len(state.validators))]
// eligible_validator_indices = [
// ValidatorIndex(index) for index, v in enumerate(state.validators)
// if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
// ]
//
// # Micro-incentives for matching FFG source, FFG target, and head
// matching_source_attestations = get_matching_source_attestations(state, previous_epoch)
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch)
// matching_head_attestations = get_matching_head_attestations(state, previous_epoch)
// for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations):
// unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
// attesting_balance = get_total_balance(state, unslashed_attesting_indices)
// for index in eligible_validator_indices:
// if index in unslashed_attesting_indices:
// rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance
// else:
// penalties[index] += get_base_reward(state, index)
//
// # Proposer and inclusion delay micro-rewards
// for index in get_unslashed_attesting_indices(state, matching_source_attestations):
// index = ValidatorIndex(index)
// attestation = min([
// a for a in matching_source_attestations
// if index in get_attesting_indices(state, a.data, a.aggregation_bits)
// ], key=lambda a: a.inclusion_delay)
// proposer_reward = Gwei(get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT)
// rewards[attestation.proposer_index] += proposer_reward
// max_attester_reward = get_base_reward(state, index) - proposer_reward
// rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
//
// # Inactivity penalty
// finality_delay = previous_epoch - state.finalized_checkpoint.epoch
// if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
// matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
// for index in eligible_validator_indices:
// index = ValidatorIndex(index)
// penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * get_base_reward(state, index))
// if index not in matching_target_attesting_indices:
// penalties[index] += Gwei(
// state.validators[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
// )
//
// return rewards, penalties
func attestationDelta(state *pb.BeaconState) ([]uint64, []uint64, error) {
prevEpoch := helpers.PrevEpoch(state)
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get total active balance")
}
rewards := make([]uint64, len(state.Validators))
penalties := make([]uint64, len(state.Validators))
// Filter out the list of eligible validator indices. The eligible validator
// has to be active or slashed but before withdrawn.
var eligible []uint64
for i, v := range state.Validators {
isActive := helpers.IsActiveValidator(v, prevEpoch)
isSlashed := v.Slashed && (prevEpoch+1 < v.WithdrawableEpoch)
if isActive || isSlashed {
eligible = append(eligible, uint64(i))
}
}
// Apply rewards and penalties for voting correct source target and head.
// Construct a attestations list contains source, target and head attestations.
atts, err := MatchAttestations(state, prevEpoch)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get source, target and head attestations")
}
var attsPackage [][]*pb.PendingAttestation
attsPackage = append(attsPackage, atts.source)
attsPackage = append(attsPackage, atts.Target)
attsPackage = append(attsPackage, atts.head)
// Cache the validators who voted correctly for source in a map
// to calculate earliest attestation rewards later.
attestersVotedSource := make(map[uint64]*pb.PendingAttestation)
// Compute rewards / penalties for each attestation in the list and update
// the rewards and penalties lists.
for i, matchAtt := range attsPackage {
indices, err := unslashedAttestingIndices(state, matchAtt)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get attestation indices")
}
attested := make(map[uint64]bool)
// Construct a map to look up validators that voted for source, target or head.
for _, index := range indices {
if i == 0 {
attestersVotedSource[index] = &pb.PendingAttestation{InclusionDelay: params.BeaconConfig().FarFutureEpoch}
}
attested[index] = true
}
attestedBalance := helpers.TotalBalance(state, indices)
// Update rewards and penalties to each eligible validator index.
for _, index := range eligible {
base, err := BaseReward(state, index)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get base reward")
}
if _, ok := attested[index]; ok {
rewards[index] += base * attestedBalance / totalBalance
} else {
penalties[index] += base
}
}
}
// For every index, filter the matching source attestation that correspond to the index,
// sort by inclusion delay and get the one that was included on chain first.
for _, att := range atts.source {
indices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get attester indices")
}
for _, i := range indices {
if _, ok := attestersVotedSource[i]; ok {
if attestersVotedSource[i].InclusionDelay > att.InclusionDelay {
attestersVotedSource[i] = att
}
}
}
}
for i, a := range attestersVotedSource {
baseReward, err := BaseReward(state, i)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get proposer reward")
}
proposerReward := baseReward / params.BeaconConfig().ProposerRewardQuotient
rewards[a.ProposerIndex] += proposerReward
attesterReward := baseReward - proposerReward
rewards[i] += attesterReward / a.InclusionDelay
}
// Apply penalties for quadratic leaks.
// When epoch since finality exceeds inactivity penalty constant, the penalty gets increased
// based on the finality delay.
finalityDelay := prevEpoch - state.FinalizedCheckpoint.Epoch
if finalityDelay > params.BeaconConfig().MinEpochsToInactivityPenalty {
targetIndices, err := unslashedAttestingIndices(state, atts.Target)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get attestation indices")
}
attestedTarget := make(map[uint64]bool)
for _, index := range targetIndices {
attestedTarget[index] = true
}
for _, index := range eligible {
base, err := BaseReward(state, index)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get base reward")
}
penalties[index] += params.BeaconConfig().BaseRewardsPerEpoch * base
if _, ok := attestedTarget[index]; !ok {
penalties[index] += state.Validators[index].EffectiveBalance * finalityDelay /
params.BeaconConfig().InactivityPenaltyQuotient
}
}
}
return rewards, penalties, nil
}

View File

@@ -0,0 +1,23 @@
package epoch
import (
"testing"
fuzz "github.com/google/gofuzz"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestFuzzFinalUpdates_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
base := &ethereum_beacon_p2p_v1.BeaconState{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(base)
s, err := beaconstate.InitializeFromProtoUnsafe(base)
if err != nil {
t.Fatal(err)
}
_, _ = ProcessFinalUpdates(s)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +0,0 @@
package epoch
import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
)
// ComputeValidatorParticipation by matching validator attestations from the previous epoch,
// computing the attesting balance, and how much attested compared to the total balance.
func ComputeValidatorParticipation(state *pb.BeaconState, epoch uint64) (*ethpb.ValidatorParticipation, error) {
currentEpoch := helpers.CurrentEpoch(state)
previousEpoch := helpers.PrevEpoch(state)
if epoch != currentEpoch && epoch != previousEpoch {
return nil, fmt.Errorf(
"requested epoch is not previous epoch %d or current epoch %d, requested %d",
previousEpoch,
currentEpoch,
epoch,
)
}
atts, err := MatchAttestations(state, epoch)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve head attestations")
}
attestedBalances, err := AttestingBalance(state, atts.Target)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve attested balances")
}
totalBalances, err := helpers.TotalActiveBalance(state)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve total balances")
}
return &ethpb.ValidatorParticipation{
GlobalParticipationRate: float32(attestedBalances) / float32(totalBalances),
VotedEther: attestedBalances,
EligibleEther: totalBalances,
}, nil
}

View File

@@ -1,166 +0,0 @@
package epoch_test
import (
"reflect"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestComputeValidatorParticipation_PreviousEpoch(t *testing.T) {
params.OverrideBeaconConfig(params.MinimalSpecConfig())
e := uint64(1)
attestedBalance := uint64(20) * params.BeaconConfig().MaxEffectiveBalance
validatorCount := uint64(100)
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
blockRoots := make([][]byte, 256)
for i := 0; i < len(blockRoots); i++ {
slot := bytesutil.Bytes32(uint64(i))
blockRoots[i] = slot
}
target := &ethpb.Checkpoint{
Epoch: e,
Root: blockRoots[0],
}
atts := []*pb.PendingAttestation{
{
Data: &ethpb.AttestationData{Target: target, Slot: 0},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: 1},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: 2},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: 3},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: 4},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
}
s := &pb.BeaconState{
Slot: e*params.BeaconConfig().SlotsPerEpoch + 1,
Validators: validators,
Balances: balances,
BlockRoots: blockRoots,
Slashings: []uint64{0, 1e9, 1e9},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
PreviousEpochAttestations: atts,
FinalizedCheckpoint: &ethpb.Checkpoint{},
JustificationBits: bitfield.Bitvector4{0x00},
PreviousJustifiedCheckpoint: target,
}
res, err := epoch.ComputeValidatorParticipation(s, e-1)
if err != nil {
t.Fatal(err)
}
wanted := &ethpb.ValidatorParticipation{
VotedEther: attestedBalance,
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
}
if !reflect.DeepEqual(res, wanted) {
t.Errorf("Incorrect validator participation, wanted %v received %v", wanted, res)
}
}
func TestComputeValidatorParticipation_CurrentEpoch(t *testing.T) {
params.OverrideBeaconConfig(params.MinimalSpecConfig())
e := uint64(1)
attestedBalance := uint64(16) * params.BeaconConfig().MaxEffectiveBalance
validatorCount := uint64(100)
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
slot := e*params.BeaconConfig().SlotsPerEpoch + 4
blockRoots := make([][]byte, 256)
for i := 0; i < len(blockRoots); i++ {
slot := bytesutil.Bytes32(uint64(i))
blockRoots[i] = slot
}
target := &ethpb.Checkpoint{
Epoch: e,
Root: blockRoots[params.BeaconConfig().SlotsPerEpoch],
}
atts := []*pb.PendingAttestation{
{
Data: &ethpb.AttestationData{Target: target, Slot: slot - 4},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: slot - 3},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: slot - 2},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
{
Data: &ethpb.AttestationData{Target: target, Slot: slot - 1},
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
}
s := &pb.BeaconState{
Slot: slot,
Validators: validators,
Balances: balances,
BlockRoots: blockRoots,
Slashings: []uint64{0, 1e9, 1e9},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentEpochAttestations: atts,
FinalizedCheckpoint: &ethpb.Checkpoint{},
JustificationBits: bitfield.Bitvector4{0x00},
CurrentJustifiedCheckpoint: target,
}
res, err := epoch.ComputeValidatorParticipation(s, e)
if err != nil {
t.Fatal(err)
}
wanted := &ethpb.ValidatorParticipation{
VotedEther: attestedBalance,
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
}
if !reflect.DeepEqual(res, wanted) {
t.Errorf("Incorrect validator participation, wanted %v received %v", wanted, res)
}
}

View File

@@ -14,12 +14,14 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -37,12 +39,13 @@ go_test(
deps = [
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/eth/v1alpha1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -6,24 +6,32 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace"
)
// Balances stores balances such as prev/current total validator balances, attested balances and more.
// It's used for metrics reporting.
var Balances *Balance
// ProcessAttestations process the attestations in state and update individual validator's pre computes,
// it also tracks and updates epoch attesting balances.
func ProcessAttestations(
ctx context.Context,
state *pb.BeaconState,
state *stateTrie.BeaconState,
vp []*Validator,
bp *Balance) ([]*Validator, *Balance, error) {
bp *Balance,
) ([]*Validator, *Balance, error) {
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.ProcessAttestations")
defer span.End()
v := &Validator{}
var err error
for _, a := range append(state.PreviousEpochAttestations, state.CurrentEpochAttestations...) {
for _, a := range append(state.PreviousEpochAttestations(), state.CurrentEpochAttestations()...) {
v.IsCurrentEpochAttester, v.IsCurrentEpochTargetAttester, err = AttestedCurrentEpoch(state, a)
if err != nil {
traceutil.AnnotateError(span, err)
@@ -35,8 +43,11 @@ func ProcessAttestations(
return nil, nil, errors.Wrap(err, "could not check validator attested previous epoch")
}
// Get attested indices and update the pre computed fields for each attested validators.
indices, err := helpers.AttestingIndices(state, a.Data, a.AggregationBits)
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, nil, err
}
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return nil, nil, err
}
@@ -44,12 +55,13 @@ func ProcessAttestations(
}
bp = UpdateBalance(vp, bp)
Balances = bp
return vp, bp, nil
}
// AttestedCurrentEpoch returns true if attestation `a` attested once in current epoch and/or epoch boundary block.
func AttestedCurrentEpoch(s *pb.BeaconState, a *pb.PendingAttestation) (bool, bool, error) {
func AttestedCurrentEpoch(s *stateTrie.BeaconState, a *pb.PendingAttestation) (bool, bool, error) {
currentEpoch := helpers.CurrentEpoch(s)
var votedCurrentEpoch, votedTarget bool
// Did validator vote current epoch.
@@ -67,7 +79,7 @@ func AttestedCurrentEpoch(s *pb.BeaconState, a *pb.PendingAttestation) (bool, bo
}
// AttestedPrevEpoch returns true if attestation `a` attested once in previous epoch and epoch boundary block and/or the same head.
func AttestedPrevEpoch(s *pb.BeaconState, a *pb.PendingAttestation) (bool, bool, bool, error) {
func AttestedPrevEpoch(s *stateTrie.BeaconState, a *pb.PendingAttestation) (bool, bool, bool, error) {
prevEpoch := helpers.PrevEpoch(s)
var votedPrevEpoch, votedTarget, votedHead bool
// Did validator vote previous epoch.
@@ -93,7 +105,7 @@ func AttestedPrevEpoch(s *pb.BeaconState, a *pb.PendingAttestation) (bool, bool,
}
// SameTarget returns true if attestation `a` attested to the same target block in state.
func SameTarget(state *pb.BeaconState, a *pb.PendingAttestation, e uint64) (bool, error) {
func SameTarget(state *stateTrie.BeaconState, a *pb.PendingAttestation, e uint64) (bool, error) {
r, err := helpers.BlockRoot(state, e)
if err != nil {
return false, err
@@ -105,7 +117,7 @@ func SameTarget(state *pb.BeaconState, a *pb.PendingAttestation, e uint64) (bool
}
// SameHead returns true if attestation `a` attested to the same block by attestation slot in state.
func SameHead(state *pb.BeaconState, a *pb.PendingAttestation) (bool, error) {
func SameHead(state *stateTrie.BeaconState, a *pb.PendingAttestation) (bool, error) {
r, err := helpers.BlockRootAtSlot(state, a.Data.Slot)
if err != nil {
return false, err

Some files were not shown because too many files have changed in this diff Show More