Compare commits

...

74 Commits

Author SHA1 Message Date
Jim McDonald
4f8d9c59dd Replace default value for datadir (#5147) 2020-03-21 23:30:51 +08:00
Ivan Martinez
021d777b5e Add Anti-Flake test for E2E (#5149)
* Add antiflake test

* Respond to comments

* Comment

* Change issue num
2020-03-21 14:42:51 +08:00
terence tsao
dc3fb018fe Fix new state mgmt sync stuck in a loop (#5142) 2020-03-19 18:46:35 -07:00
Preston Van Loon
2ab4b86f9b Allow setting flags via yaml config file. (#4878) 2020-03-19 14:46:44 -07:00
Ivan Martinez
b30a089548 Add fetching validators by indices and public keys (#5141)
* update ethereumapis with patch
* Add indices and pubkeys to ListValidators request
* Add sorting
* Merge branch 'master' into validators-by-keys-indices
* Rename to index
* Merge branch 'validators-by-keys-indices' of https://github.com/prysmaticlabs/prysm into validators-by-keys-indices
* Add comment
2020-03-19 20:30:40 +00:00
Ivan Martinez
271938202e Improve validator logs (#5140)
* Imporve validator logging

* Update validator/client/validator_log.go
2020-03-19 13:34:50 -04:00
shayzluf
6fe814c5aa double proposal detector (#5120)
* proposal detector

* comment fixes

* comment fixes

* raul feedback

* fix todo

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-19 17:29:35 +05:30
Preston Van Loon
a9f4d1d02d Attestation: Add a check for overflow (#5136)
* Add a check for overflow
* gofmt beacon-chain/cache/committee_test.go
2020-03-19 04:41:05 +00:00
Preston Van Loon
7c110e54f0 Add ssz marshal and unmarshal for most data structures (#5121)
* Add ssz marshal and unmarshal for most data structures
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Update ferran SSZ
* Update ferran's SSZ
* Merge refs/heads/master into ssz-stuff
* fix tests
* Merge branch 'ssz-stuff' of github.com:prysmaticlabs/prysm into ssz-stuff
* gaz
2020-03-19 02:39:23 +00:00
Raul Jordan
3043d4722f Attestation Dynamic Committee Subnets (#5123)
* initiate cache
* imports fix
* add in feature config flag
* utilize a dynamic set of subnets
* Merge branch 'master' into att-subnets
* add in feature config flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* shift
* more changes
* gaz
* Update beacon-chain/rpc/validator/assignments.go
* Update beacon-chain/rpc/validator/assignments.go
* add flag
* Merge branch 'att-subnets' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* Merge branch 'master' into att-subnets
* Merge refs/heads/master into att-subnets
* no double flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* amend committee ids to better name
* gaz
2020-03-18 23:13:37 +00:00
Ivan Martinez
c96c8b4aa3 Minor slasher fixes (#5129)
* Minor fixes

* Change log
2020-03-18 14:49:20 -05:00
Nishant Das
9f46000dba change to batch size (#5128)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-18 17:57:20 +08:00
Nishant Das
5450b3155e Integrate Field Tries into Current State (#5082)
* add new methods
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* new field trie
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* perform better copying
* fix bug
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* add support for variable length arrays
* get it running
* save all new progress
* more fixes
* more fixes
* more cleanup
* some more clean up
* new memory pool
* remove lock
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* use wrapper
* remove redundant methods
* cleanup
* cleanup
* remove unused method
* change field
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
2020-03-18 04:52:08 +00:00
Nishant Das
1bb12c3568 Add Field Trie to State (#5118)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

* add in new tests

* fix all tests

* Apply suggestions from code review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-18 10:09:31 +08:00
Ivan Martinez
1be8b3aa5e Slasher lag fix (#5124)
* Slasher fixes

* fix
2020-03-17 16:53:08 -05:00
Nishant Das
431762164e Add New State Utils (#5117)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-17 14:25:17 -05:00
Victor Farazdagi
3ec2a0f9e0 Refactoring of initial sync (#5096)
* implements blocks queue

* refactors updateCounter method

* fixes deadlock on stop w/o start

* refactors updateSchedulerState

* more tests on schduler

* parseFetchResponse tests

* wraps up tests for blocks queue

* eod commit

* fixes data race in round robin

* revamps fetcher

* fixes race conditions + livelocks + deadlocks

* less verbose output

* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* minor fix to round robin

* moves original round robin into its own package

* adds enableInitSyncQueue flag

* fixes issue with init-sync service selection

* Update beacon-chain/sync/initial-sync/round_robin.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* initsyncv1 -> initsyncold

* adds span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-17 12:27:18 -05:00
Victor Farazdagi
e96b45b29c asserts non-nil state (#5115) 2020-03-17 07:58:16 -07:00
terence tsao
e529f5b1d6 Part 1 of integrating new state mgmt to run time (#5108) 2020-03-16 12:07:07 -07:00
Victor Farazdagi
f18bada8c9 Init sync blocks queue (#5064)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* Update beacon-chain/sync/initial-sync/blocks_queue.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update beacon-chain/sync/initial-sync/blocks_queue_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-16 18:21:36 +03:00
terence tsao
5657535c52 Fixed saveNewValidators error log (#5109) 2020-03-15 16:21:56 -07:00
Preston Van Loon
9da9fbdfba Fix reward and penality zero epoch bug (#5107)
* Fix reward and penality bug https://github.com/prysmaticlabs/prysm/issues/5105
* Merge branch 'master' into fuzz-fix-attestationDelta
2020-03-15 19:14:52 +00:00
Ivan Martinez
de2ec8e575 Update README for Slasher (#5106)
* Add readme
2020-03-15 18:46:21 +00:00
terence tsao
3660732f44 Resume new state mgmt (#5102) 2020-03-15 09:47:49 -07:00
Jim McDonald
8e6c16d416 Tweak validator logging (#5103)
* Tidy up logging
2020-03-15 15:46:22 +00:00
Ivan Martinez
8143cc36bc Add Slasher to E2E (#5061)
* Start adding "inject slashing into pool"

* Attempt at slashing

* Remove unneded

* Fix

* Begin adding slasher client to e2e

* Start slasher in e2e

* Get slashing detection working

* Get slashing evaluators working

* Progress on e2e

* Cleanup e2e

* Fix slasher e2e!

* lint

* Comment

* Fixes

* Improve accuracy of balance check

* REmove extra

* Remove extra

* Make more accurate
2020-03-15 01:09:23 -04:00
terence tsao
eeffa4fb30 New state getter (#5101)
* getter.go
* getter_test.go
* fixed a cold bug
* fmt gaz
* All tests pass
* Merge branch 'master' into new-state-getter
* Merge refs/heads/master into new-state-getter
2020-03-14 18:39:23 +00:00
Victor Farazdagi
1137403e4b Init sync pre queue (#5098)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-14 13:21:07 -05:00
terence tsao
f17818b1c0 New state setter (#5100)
* setter.go
* tests
* fmt
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-state-setter
* Merge refs/heads/master into new-state-setter
2020-03-14 16:31:21 +00:00
Nishant Das
691f0bba70 Minor Improvements (#5099)
* add fixes
2020-03-14 16:12:22 +00:00
terence tsao
b024191887 Get cold intermediate state with slot (#5097)
* loadColdIntermediateStateWithSlot

* Starting test

* Tests
2020-03-14 10:34:37 -05:00
Raul Jordan
1f87cb11fc Use Current Time Slot for Fetching Committees in RPC (#5094)
* use genesis time fetcher
* Merge branch 'master' into use-time-fetcher
* fix breaking
* Merge branch 'use-time-fetcher' of github.com:prysmaticlabs/prysm into use-time-fetcher
* list beacon committees tests fixed
* Merge branch 'master' into use-time-fetcher
* Merge branch 'master' into use-time-fetcher
* Merge refs/heads/master into use-time-fetcher
* Update beacon-chain/rpc/beacon/committees_test.go
2020-03-14 03:32:51 +00:00
Preston Van Loon
a0b142a26c Update to go 1.14 (#4947)
* Update to go 1.14
* Update with fix from https://github.com/bazelbuild/rules_go/pull/2388
* Merge branch 'master' into go-1.14
* Merge refs/heads/master into go-1.14
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Update gRPC
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Update golang.org/x/crypto
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Committing gc_goopts for issue repro
* Fix race and msan builds
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Merge refs/heads/master into go-1.14
* switch to LRU
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Fixed, but dont feel good about this
* Switch append ordering
2020-03-14 00:12:52 +00:00
shayzluf
035eaffd9d handle slashing from p2p (#5047)
* handle slashing from p2p

* gaz

* remove unneeded check

* add tests

* gaz  goimports

* text update

* Apply suggestions from code review

* add proto.equal

* fix test

* add context to call

* fix state bug found by terence

* fix tests add error type handling

* nil checks

* nil head state check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 16:47:27 -05:00
Ivan Martinez
c41244ad34 Make spanner tests more thorough, fixes (#5093)
* Fix tests for spanner

* Start change to indexed atts

* Improve tests for spanner

* Fix tests

* Remove extra
2020-03-13 14:04:22 -04:00
terence tsao
c20d9ccbb3 Better attestation pool with map instead of expiration cache (#5087)
* update aggregated

* update block att

* update forkchoice att

* update unaggregated att

* gazelle

* Use copy

* Locks

* Genesis time

* Fixed tests

* Comments

* Fixed tests
2020-03-13 12:35:28 -05:00
Ivan Martinez
3380d14475 Include ejected indices in ActiveSetChanges endpoint (#5066)
* Fix ActiveSetChanges

* Include ejected indices in ActiveSetChanges RPC

* Fix test fails

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 12:23:19 -04:00
shayzluf
4f031d1988 fix slasher rpc disconnect on error (#5092) 2020-03-13 10:59:14 -05:00
Jim McDonald
02afb53ea4 Remove suprious error messages in wallet keymanager (#5090)
* Handle multiple passphrases

* Add tests
2020-03-13 05:26:10 -07:00
terence tsao
0974c02a00 Load cold state by root (#5086) 2020-03-12 15:27:55 -07:00
Raul Jordan
c6acf0a28c Use Target Epoch to Determine Indexed Attestations for Slasher (#5085)
* no more head fetchre

* no need for head fetcher

* nil checks
2020-03-12 17:02:12 -05:00
terence tsao
ed7ad4525e Method to retrieve block slot via block root (#5084)
* blockRootSlot

* Tests

* Gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 16:04:24 -05:00
terence tsao
7fcc07fb45 Save hot state (#5083)
* loadEpochBoundaryRoot
* Tests
* Span
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Starting test
* Tests
* Merge refs/heads/master into save-hot-state
* Merge branch 'master' into save-hot-state
* Use copy
* Merge branch 'save-hot-state' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Merge refs/heads/master into save-hot-state
2020-03-12 20:48:07 +00:00
shayzluf
f937713fe9 Broadcast slashing (#5073)
* add flag
* broadcast slashings
* Merge branch 'master' of github.com:prysmaticlabs/prysm into broadcast_slashing

# Conflicts:
#	beacon-chain/rpc/beacon/slashings_test.go
* fix tests
* goimports
* goimports
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
2020-03-12 20:29:23 +00:00
terence tsao
359e0abe1d Load epoch boundary root (#5079)
* loadEpochBoundaryRoot

* Tests

* Span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 15:00:37 -05:00
tzapu
0704ba685a Return statuses on duties (#5069)
* try to return somethign for everything
* default to unknown
* debug
* moar debug
* move else to outer check
* working
* reorder imports
* cleanup
* fix TestGetDuties_NextEpoch_CantFindValidatorIdx
* Merge branch 'master' into return-statuses-on-duties
* Update validator/client/validator.go
* Merge branch 'master' into return-statuses-on-duties
* Merge branch 'master' into return-statuses-on-duties
2020-03-12 19:07:37 +00:00
shayzluf
0f95b797af Save slashings to slasher DB (#5081)
* fix tests add error type handling

* Update slasher/detection/detect_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* goimports

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-03-12 22:08:58 +05:30
terence tsao
43722e45f4 Save cold state (#5077) 2020-03-12 05:58:06 -07:00
terence tsao
ff4ed413a3 State migration from hot to cold (archived) (#5076)
* Starting

* Test

* Tests

* comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-11 21:27:16 -05:00
Raul Jordan
f1a42eb589 Verify Slashing Signatures Before Putting Into Blocks (#5071)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* verify slashing
* added in test for pending att slashing
* tests starting to apss
* sig failed verify regression test
* tests passing for ops pool
* Update beacon-chain/operations/slashings/service.go
* Merge refs/heads/master into verify-slash-sig
* verify on insert
* tests starting to pass
* all code paths fixed
* imports
* fix build
* fix rpc errors
* Merge refs/heads/master into verify-slash-sig
2020-03-12 01:16:55 +00:00
terence tsao
a90ffaba49 Archived point retrieval and recovery (#5075) 2020-03-11 17:38:30 -07:00
Raul Jordan
663d919b6f Include Bazel Genrule for Fast SSZ (#5070)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* included new ssz bzl rule
* Merge branch 'master' into add-in-starlark-rule
* Update tools/ssz.bzl

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-11 19:50:22 +00:00
Victor Farazdagi
7b30845c01 fixes races in blocks fetcher (#5068) 2020-03-11 14:21:41 +03:00
Victor Farazdagi
46eb228379 fixes data race in state.Slot (#5067)
* fixes data race in state/getters
2020-03-11 09:11:07 +00:00
Raul Jordan
8d3fc1ad3e Add in Slasher Metrics (#5060)
* added in slasher metrics
* Merge branch 'master' into slasher-metrics
* add in prom bolt metrics for slasher
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
* imports
* include all metrics
* no dup bolt collector
* Update slasher/detection/attestations/spanner.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* naming best practices for prom, thx Terence
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
2020-03-10 19:41:55 +00:00
Nishant Das
93195b762b Improve HTR of State (#5058)
* add cache
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/hash_function.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge branch 'master' into improveHTR
* add back string casting
* fix imports
2020-03-10 16:26:54 +00:00
Jim McDonald
f0abf0d7d5 Reduce frequency of 'eth1 client not syncing' messages (#5057) 2020-03-10 09:51:41 -05:00
Nishant Das
9d27449212 Discovery Fixes (#5050)
* connect to dv5 bootnodes

* fix test

* change polling period

* ignore

* Update beacon-chain/p2p/service.go

* Update beacon-chain/p2p/service_test.go

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-09 19:53:37 -07:00
Preston Van Loon
edb6590764 Build herumi's BLS from source (#5055)
* Build herumi from source. Working so far on linux_amd64 for compile, but tests fail to initialize the curve appropriately

* Add copts to go_default_library

* llvm toolchain, still WIP

* Fixes, make llvm a config flag

* fix gazelle resolution

* comment

* comment

* update herumi to the v0.9.4 version

* Apply @nisdas patch from https://github.com/herumi/bls-eth-go-binary/pull/5
2020-03-09 21:22:41 -05:00
Raul Jordan
e77cf724b8 Better Nil Check in Slasher (#5053)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* some nil checks in slasher
2020-03-09 21:21:39 +00:00
Ivan Martinez
b633dfe880 Change detection and updating in Slasher to per attestation (#5043)
* Change span updates to update multiple validators at once

* Change detection to perform on multiple validators at once

* Fix minspan issue

* Fix indices

* Fix test

* Remove logs

* Remove more logs

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/detect.go

* nil check

* fix ununsed import

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 13:14:19 -05:00
Ivan Martinez
8334aac111 Batch saving of attestations from stream for slasher (#5041)
* Batch saving of attestations from stream for slasher

* Progress on test

* Fixes

* Fix test

* Rename

* Modify logs and timing

* Change

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 12:49:40 -05:00
Preston Van Loon
4c1e2ba196 Add prysm.sh script (#5042)
* Add prysm.sh script

* Add dist to gitignore

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-09 12:19:53 -05:00
terence tsao
25c13663d2 Add hot state by slot retrival (#5052)
* Update replay conditions

* loadHotStateBySlot

* Tests and gaz

* Tests
2020-03-09 11:22:45 -05:00
Jim McDonald
0c3af32274 Use BeaconBlockHeader in place of BeaconBlock (#5049) 2020-03-09 21:08:30 +08:00
shayzluf
01cb01a8f2 On eviction test fix (#5046) 2020-03-09 01:35:39 -04:00
Raul Jordan
0c9e99e04a Aggregate Attestations Before Streaming to Slasher (#5029)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* aggregate before streaming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* collect atts and increase buffer size
* fix test for func
* Merge refs/heads/master into agg-idx-atts
* Update beacon-chain/rpc/beacon/attestations.go
* Merge refs/heads/master into agg-idx-atts
* naming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* comment terence feedback
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Fix tests
2020-03-08 21:39:54 +00:00
Ivan Martinez
d4cd51f23e Change slasher cache to LRU cache (#5037)
* Change cache to LRU cache

* fixes

* REduce db usage

* Fix function name

* Merge issues

* Save on eviction

* Fixes

* Fix

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-08 17:11:59 -04:00
terence tsao
962fe8552d Compute state up to slot (#5035) 2020-03-08 21:41:24 +01:00
Raul Jordan
eddaea869b Prepare Slasher for Production (#5020)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* add a bit more better logging
* Empty db fix
* Improve logs
* Fix small issues in spanner, improvements
* Change costs back to 1 for now
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Change the cache back to 0
* Cleanup
* Merge branch 'master' into cleanup-slasher
* lint
* added in better spans
* log
* rem spanner in super intensive operation
* Merge branch 'master' into cleanup-slasher
* add todo
* Merge branch 'cleanup-slasher' of github.com:prysmaticlabs/prysm into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Apply suggestions from code review
* no logrus
* Merge branch 'master' into cleanup-slasher
* Merge branch 'cleanup-slasher' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Remove spammy logs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* gaz
* Rename func
* Add back needed code
* Add todo
* Add span to cache func
2020-03-08 17:56:43 +00:00
Nishant Das
300d072456 Add Config Change for Validator (#5038)
* add config for validator
* gaz
* Merge refs/heads/master into configureValidator
* Merge refs/heads/master into configureValidator
2020-03-08 06:45:36 +00:00
Nishant Das
ac1c92e241 Add Prometheus Service for Slasher (#5039)
* add prometheus service
* Update slasher/node/node.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into addPromServiceSlasher
2020-03-08 06:35:37 +00:00
terence tsao
2452c7403b Load hot state by root (#5034)
* Add loadHotStateByRoot

* Touchup loadHotStateByRoot

* Tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-08 14:24:57 +08:00
Preston Van Loon
b97e22107c Update rbe_autoconf (#5036)
* Update rbe_autoconf
* Update timestamps
2020-03-07 21:18:16 +00:00
285 changed files with 12114 additions and 3309 deletions

View File

@@ -35,6 +35,10 @@ build:release --workspace_status_command=./scripts/workspace_status.sh
build:release --stamp
build:release --compilation_mode=opt
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain

3
.gitignore vendored
View File

@@ -29,3 +29,6 @@ password.txt
# go dependancy
/go.mod
/go.sum
# Dist files
dist

137
WORKSPACE
View File

@@ -13,6 +13,28 @@ http_archive(
],
)
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
bazel_toolchain_dependencies()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "9.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
configure_prysm_toolchains()
@@ -30,10 +52,10 @@ http_archive(
http_archive(
name = "bazel_gazelle",
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
sha256 = "d8c45ee70ec39a57e7a05e5027c32b1576cc7f16d9dd37135b0eddde45cf1b10",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
],
)
@@ -53,10 +75,10 @@ http_archive(
http_archive(
name = "io_bazel_rules_go",
sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b",
sha256 = "e6a6c016b0663e06fa5fccf1cd8152eab8aa8180c583ec20c872f4f9953a7ac5",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
],
)
@@ -70,7 +92,7 @@ git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1563544980 +0300",
shallow_since = "1569509514 +0300",
)
# Override default import in rules_go with special patch until
@@ -84,7 +106,7 @@ git_repository(
"//third_party:com_github_gogo_protobuf-equal.patch",
],
remote = "https://github.com/gogo/protobuf",
shallow_since = "1567336231 +0200",
shallow_since = "1571033717 +0200",
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
@@ -95,6 +117,10 @@ load(
container_repositories()
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
@@ -202,13 +228,6 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
)
http_archive(
name = "com_github_herumi_bls_eth_go_binary",
sha256 = "b5628a95bd1e6ff84f73d87c134bb1e7e9c1a5a2a10b831867d9dad7d8defc3e",
strip_prefix = "bls-go-binary-8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d",
url = "https://github.com/nisdas/bls-go-binary/archive/8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d.zip",
)
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
@@ -235,9 +254,9 @@ all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//v
http_archive(
name = "rules_foreign_cc",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
strip_prefix = "rules_foreign_cc-456425521973736ef346d93d3d6ba07d807047df",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/456425521973736ef346d93d3d6ba07d807047df.zip",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
@@ -273,12 +292,10 @@ go_repository(
name = "com_github_prysmaticlabs_go_ssz",
commit = "e24db4d9e9637cf88ee9e4a779e339a1686a84ee",
importpath = "github.com/prysmaticlabs/go-ssz",
)
go_repository(
name = "com_github_urfave_cli",
commit = "e6cf83ec39f6e1158ced1927d4ed14578fda8edb", # v1.21.0
importpath = "github.com/urfave/cli",
patch_args = ["-p1"],
patches = [
"//third_party:com_github_prysmaticlabs_go_ssz.patch",
],
)
go_repository(
@@ -609,8 +626,9 @@ go_repository(
go_repository(
name = "org_golang_x_crypto",
commit = "4def268fd1a49955bfb3dda92fe3db4f924f2285",
importpath = "golang.org/x/crypto",
sum = "h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=",
version = "v0.0.0-20200221231518-2aa609cf4a9d",
)
go_repository(
@@ -736,10 +754,12 @@ go_repository(
importpath = "github.com/matttproud/golang_protobuf_extensions",
)
go_repository(
name = "com_github_boltdb_bolt",
commit = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8", # v1.3.1
importpath = "github.com/boltdb/bolt",
http_archive(
name = "com_github_boltdb_bolt", # v1.3.1
build_file = "//third_party:boltdb/bolt.BUILD",
sha256 = "95dc5842dab55f7519b7002bbec648321277b5d6f0ad59aab509ee59313b6386",
strip_prefix = "bolt-2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8",
urls = ["https://github.com/boltdb/bolt/archive/2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8.tar.gz"],
)
go_repository(
@@ -1181,8 +1201,9 @@ go_ssz_dependencies()
go_repository(
name = "org_golang_google_grpc",
build_file_proto_mode = "disable",
commit = "1d89a3c832915b2314551c1d2a506874d62e53f7", # v1.22.0
importpath = "google.golang.org/grpc",
sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=",
version = "v1.27.1",
)
go_repository(
@@ -1273,7 +1294,7 @@ go_repository(
go_repository(
name = "com_github_prysmaticlabs_ethereumapis",
commit = "fca4d6f69bedb8615c2fc916d1a68f2692285caa",
commit = "25f267e475788bf8e5e01cb9d73cfd0c87020822",
importpath = "github.com/prysmaticlabs/ethereumapis",
patch_args = ["-p1"],
patches = [
@@ -1283,8 +1304,9 @@ go_repository(
go_repository(
name = "com_github_cloudflare_roughtime",
commit = "d41fdcee702eb3e5c3296288a453b9340184d37e",
importpath = "github.com/cloudflare/roughtime",
sum = "h1:jeSxE3fepJdhASERvBHI6RFkMhISv6Ir2JUybYLIVXs=",
version = "v0.0.0-20200205191924-a69ef1dab727",
)
go_repository(
@@ -1351,13 +1373,6 @@ go_repository(
version = "v0.10.5",
)
go_repository(
name = "in_gopkg_urfave_cli_v1",
importpath = "gopkg.in/urfave/cli.v1",
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
version = "v1.20.0",
)
go_repository(
name = "com_github_naoina_go_stringutil",
importpath = "github.com/naoina/go-stringutil",
@@ -1571,3 +1586,51 @@ go_repository(
sum = "h1:J1gHJRNFEk7NdiaPQQqAvxEy+7hhCsVv3uzduWybmqY=",
version = "v0.0.0-20200302201340-8c54356e12c9",
)
go_repository(
name = "com_github_ferranbt_fastssz",
commit = "06015a5d84f9e4eefe2c21377ca678fa8f1a1b09",
importpath = "github.com/ferranbt/fastssz",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "in_gopkg_urfave_cli_v2",
importpath = "gopkg.in/urfave/cli.v2",
sum = "h1:OvXt/p4cdwNl+mwcWMq/AxaKFkhdxcjx+tx+qf4EOvY=",
version = "v2.0.0-20190806201727-b62605953717",
)
go_repository(
name = "in_gopkg_urfave_cli_v1",
importpath = "gopkg.in/urfave/cli.v1",
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
version = "v1.20.0",
)

View File

@@ -23,9 +23,10 @@ go_library(
"@com_github_ipfs_go_log//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@com_github_whyrusleeping_go_logging//:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
],
)
@@ -55,9 +56,10 @@ go_image(
"@com_github_ipfs_go_log//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@com_github_whyrusleeping_go_logging//:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
],
)
@@ -111,7 +113,10 @@ go_test(
size = "small",
srcs = ["usage_test.go"],
embed = [":go_default_library"],
deps = ["@com_github_urfave_cli//:go_default_library"],
deps = [
"//shared/featureconfig:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)
[go_binary(

View File

@@ -115,6 +115,13 @@ go_test(
"service_norace_test.go",
],
embed = [":go_default_library"],
gc_goopts = [
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
# See: https://github.com/etcd-io/bbolt/issues/187.
"-d=checkptr=0",
],
race = "on",
tags = ["race_on"],
deps = [

View File

@@ -186,7 +186,7 @@ func (s *Service) CurrentFork() *pb.Fork {
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
}
return s.headState().Fork()
return s.head.state.Fork()
}
// Participation returns the participation stats of a given epoch.

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -58,9 +59,15 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
return nil
}
} else {
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
}
}
// Get the new head block from DB.
@@ -74,15 +81,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Get the new head state from cached state or DB.
var newHeadState *state.BeaconState
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if featureconfig.Get().NewStateMgmt {
newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
} else {
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
}
}
if newHeadState == nil {
@@ -108,19 +119,27 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
return errors.New("cannot save nil head block")
}
headState, err := s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
var headState *state.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
headState, err = s.stateGen.StateByRoot(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
} else {
headState, err = s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
}
s.initSyncStateLock.RUnlock()
}
s.initSyncStateLock.RUnlock()
}
if headState == nil {
return errors.New("nil head state")
}

View File

@@ -11,11 +11,12 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
messagediff "gopkg.in/d4l3k/messagediff.v1"
)
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
@@ -209,7 +210,7 @@ func TestPruneNonBoundary_CanPrune(t *testing.T) {
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db)}
service, err := NewService(context.Background(), cfg)
if err != nil {
t.Fatal(err)

View File

@@ -30,30 +30,40 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
if cachedState != nil {
return cachedState, nil
}
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
var baseState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
baseState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
} else {
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
return nil, errors.Wrapf(err, "could not get head root")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return st, nil
}
return st, nil
}
baseState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
}
baseState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
@@ -120,10 +130,18 @@ func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.Be
if err == blocks.ErrSigFailedToVerify {
// When sig fails to verify, check if there's a differences in committees due to
// different seeds.
aState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
var aState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
return nil, err
}
aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
epoch := helpers.SlotToEpoch(a.Data.Slot)
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {

View File

@@ -14,6 +14,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -88,8 +89,14 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
} else {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
}
// Update justified check point.
@@ -105,18 +112,21 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
// Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned.
s.forkChoiceStore.Prune(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
s.forkChoiceStore.Prune(ctx, fRoot)
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
@@ -124,11 +134,21 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return nil, errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
if err != nil {
return nil, err
}
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
return nil, err
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
return nil, errors.Wrap(err, "could not save new validators")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -173,16 +193,18 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
b := signed.Block
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
// Retrieve incoming block's pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := preState.NumValidators()
// Exit early if the pre state slot is higher than incoming block's slot.
if preState.Slot() >= signed.Block.Slot {
return nil
}
preStateValidatorCount := preState.NumValidators()
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
@@ -200,8 +222,16 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
} else {
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
}
if flags.Get().EnableArchive {
atts := signed.Block.Body.Attestations
@@ -219,17 +249,19 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
}
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
@@ -242,21 +274,35 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get state by root for migration")
}
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
return errors.Wrap(err, "could not migrate with new finalized root")
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
return errors.Wrap(err, "could not save new validators")
}
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
if !featureconfig.Get().NewStateMgmt {
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -272,7 +318,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return err
}
if helpers.IsEpochStart(postState.Slot()) {
if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}

View File

@@ -58,6 +58,20 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
defer span.End()
if featureconfig.Get().NewStateMgmt {
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
return preState, nil // No copy needed from newly hydrated state gen object.
}
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
var err error
if preState == nil {
@@ -258,24 +272,26 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
s.justifiedCheckpt = cpt
}
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
if !featureconfig.Get().NewStateMgmt {
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
}
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}

View File

@@ -90,7 +90,13 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
ctx := context.Background()
atts := s.attPool.ForkchoiceAttestations()
for _, a := range atts {
hasState := s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
var hasState bool
if featureconfig.Get().NewStateMgmt {
hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
} else {
hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
}
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue

View File

@@ -33,6 +33,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -73,6 +74,7 @@ type Service struct {
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
stateGen *stategen.State
opsService *attestations.Service
}
// Config options for the service.
@@ -88,6 +90,8 @@ type Config struct {
MaxRoutines int64
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
OpsService *attestations.Service
StateGen *stategen.State
}
// NewService instantiates a new block service instance that will
@@ -111,7 +115,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
checkpointState: cache.NewCheckpointStateCache(),
stateGen: stategen.New(cfg.BeaconDB),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
}, nil
}
@@ -130,10 +135,18 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if beaconState == nil {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
if featureconfig.Get().NewStateMgmt {
beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
} else {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
}
}
@@ -144,6 +157,7 @@ func (s *Service) Start() {
if beaconState != nil {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
s.opsService.SetGenesisTime(beaconState.GenesisTime())
if err := s.initializeChainInfo(ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
@@ -260,6 +274,8 @@ func (s *Service) initializeBeaconChain(
return err
}
s.opsService.SetGenesisTime(genesisState.GenesisTime())
return nil
}
@@ -311,8 +327,21 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
BoundaryRoot: genesisBlkRoot[:],
}); err != nil {
return err
}
} else {
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
}
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
@@ -388,11 +417,25 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
finalizedState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
var finalizedState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
finalizedState, err = s.stateGen.Resume(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if finalizedRoot == params.BeaconConfig().ZeroHash {
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
}
} else {
finalizedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
}
finalizedBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
@@ -400,8 +443,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if finalizedState == nil || finalizedBlock == nil {
return errors.New("finalized state and block can't be nil")
}
s.setHead(bytesutil.ToBytes32(finalized.Root), finalizedBlock, finalizedState)
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
return nil
}

View File

@@ -133,6 +133,10 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
t.Fatalf("unable to set up web3 service: %v", err)
}
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
if err != nil {
t.Fatal(err)
}
cfg := &Config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
@@ -142,10 +146,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
OpsService: opsService,
}
if err != nil {
t.Fatalf("could not register blockchain service: %v", err)
}
chainService, err := NewService(ctx, cfg)
if err != nil {
t.Fatalf("unable to setup chain service: %v", err)

View File

@@ -219,7 +219,7 @@ func (ms *ChainService) GenesisTime() time.Time {
// CurrentSlot mocks the same method in the chain service.
func (ms *ChainService) CurrentSlot() uint64 {
return 0
return ms.HeadSlot()
}
// Participation mocks the same method in the chain service.

View File

@@ -6,6 +6,7 @@ go_library(
"attestation_data.go",
"checkpoint_state.go",
"committee.go",
"committee_ids.go",
"common.go",
"eth1_data.go",
"hot_state_cache.go",

View File

@@ -96,7 +96,7 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
start, end := startEndIndices(item, indexOffSet)
if int(end) > len(item.ShuffledIndices) {
if int(end) > len(item.ShuffledIndices) || end < start {
return nil, errors.New("requested index out of bound")
}

44
beacon-chain/cache/committee_ids.go vendored Normal file
View File

@@ -0,0 +1,44 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type committeeIDs struct {
cache *lru.Cache
lock sync.RWMutex
}
// CommitteeIDs for attestations.
var CommitteeIDs = newCommitteeIDs()
func newCommitteeIDs() *committeeIDs {
cache, err := lru.New(8)
if err != nil {
panic(err)
}
return &committeeIDs{cache: cache}
}
// AddIDs to the cache for attestation committees by epoch.
func (t *committeeIDs) AddIDs(indices []uint64, epoch uint64) {
t.lock.Lock()
defer t.lock.Unlock()
val, exists := t.cache.Get(epoch)
if exists {
indices = sliceutil.UnionUint64(append(indices, val.([]uint64)...))
}
t.cache.Add(epoch, indices)
}
// GetIDs from the cache for attestation committees by epoch.
func (t *committeeIDs) GetIDs(epoch uint64) []uint64 {
val, exists := t.cache.Get(epoch)
if !exists {
return []uint64{}
}
return val.([]uint64)
}

View File

@@ -1,6 +1,7 @@
package cache
import (
"math"
"reflect"
"sort"
"strconv"
@@ -172,3 +173,19 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
t.Error("incorrect key received for slot 199")
}
}
func TestCommitteeCacheOutOfRange(t *testing.T) {
cache := NewCommitteesCache()
seed := bytesutil.ToBytes32([]byte("foo"))
cache.CommitteeCache.Add(&Committees{
CommitteeCount: 1,
Seed: seed,
ShuffledIndices: []uint64{0},
SortedIndices: []uint64{},
ProposerIndices: []uint64{},
})
_, err := cache.Committee(0, seed, math.MaxUint64) // Overflow!
if err == nil {
t.Fatal("Did not fail as expected")
}
}

View File

@@ -170,7 +170,7 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
// a callback is used here to apply the following actions to all validators
// below equally.
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) error {
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, totalBalance)
@@ -178,10 +178,11 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / totalBalance * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
})
return state, err
}
@@ -235,12 +236,12 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
bals := state.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
if val == nil {
return fmt.Errorf("validator %d is nil in state", idx)
return false, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
return false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
@@ -249,8 +250,9 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
}
return true, nil
}
return nil
return false, nil
}
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {

View File

@@ -72,7 +72,7 @@ func attestationDeltas(state *stateTrie.BeaconState, bp *Balance, vp []*Validato
func attestationDelta(state *stateTrie.BeaconState, bp *Balance, v *Validator) (uint64, uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible {
if !eligible || bp.CurrentEpoch == 0 {
return 0, 0
}

View File

@@ -146,6 +146,47 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
}
}
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(2048)
base := buildState(e+2, validatorCount)
atts := make([]*pb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := 0; i < len(atts); i++ {
atts[i] = &pb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
Source: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
BeaconBlockRoot: emptyRoot[:],
},
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x01},
InclusionDelay: 1,
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
if err != nil {
t.Fatal(err)
}
vp, bp := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil {
t.Fatal(err)
}
bp.CurrentEpoch = 0 // Could cause a divide by zero panic.
_, _, err = attestationDeltas(state, bp, vp)
if err != nil {
t.Fatal(err)
}
}
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {

View File

@@ -21,7 +21,7 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
totalSlashing += slashing
}
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, p.CurrentEpoch)
@@ -29,10 +29,11 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / p.CurrentEpoch * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
}
return state.ApplyToEveryValidator(validatorFunc)

View File

@@ -34,8 +34,8 @@ go_test(
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)
@@ -68,8 +68,8 @@ go_test(
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)

View File

@@ -166,7 +166,7 @@ func SlashValidator(state *stateTrie.BeaconState, slashedIdx uint64, whistleBlow
return state, nil
}
// ActivatedValidatorIndices determines the indices activated during the current epoch.
// ActivatedValidatorIndices determines the indices activated during the given epoch.
func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
activations := make([]uint64, 0)
delayedActivationEpoch := helpers.ActivationExitEpoch(epoch)
@@ -179,7 +179,7 @@ func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []ui
return activations
}
// SlashedValidatorIndices determines the indices slashed during the current epoch.
// SlashedValidatorIndices determines the indices slashed during the given epoch.
func SlashedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
slashed := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
@@ -225,9 +225,51 @@ func ExitedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeV
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
exited = append(exited, uint64(i))
}
}
return exited, nil
}
// EjectedValidatorIndices determines the indices ejected during the given epoch.
func EjectedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeValidatorCount uint64) ([]uint64, error) {
ejected := make([]uint64, 0)
exitEpochs := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch)
}
}
exitQueueEpoch := uint64(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := 0
for _, val := range validators {
if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++
}
}
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, errors.Wrap(err, "could not get churn limit")
}
if churn < uint64(exitQueueChurn) {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
ejected = append(ejected, uint64(i))
}
}
return ejected, nil
}

View File

@@ -344,14 +344,17 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -364,6 +367,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -376,6 +380,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},

View File

@@ -55,6 +55,8 @@ type ReadOnlyDatabase interface {
ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error)
ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
HasArchivedPoint(ctx context.Context, index uint64) bool
LastArchivedIndexRoot(ctx context.Context) [32]byte
LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error)
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
@@ -104,6 +106,7 @@ type NoHeadAccessDatabase interface {
SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *eth.ValidatorParticipation) error
SaveArchivedPointState(ctx context.Context, state *state.BeaconState, index uint64) error
SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error
SaveLastArchivedIndex(ctx context.Context, index uint64) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.

View File

@@ -352,3 +352,18 @@ func (e Exporter) ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
func (e Exporter) HasArchivedPoint(ctx context.Context, index uint64) bool {
return e.db.HasArchivedPoint(ctx, index)
}
// LastArchivedIndexRoot -- passthrough
func (e Exporter) LastArchivedIndexRoot(ctx context.Context) [32]byte {
return e.db.LastArchivedIndexRoot(ctx)
}
// LastArchivedIndexState -- passthrough
func (e Exporter) LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error) {
return e.db.LastArchivedIndexState(ctx)
}
// SaveLastArchivedIndex -- passthrough
func (e Exporter) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
return e.db.SaveLastArchivedIndex(ctx, index)
}

View File

@@ -32,6 +32,7 @@ go_library(
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/traceutil:go_default_library",

View File

@@ -41,6 +41,67 @@ func (k *Store) SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, i
})
}
// SaveLastArchivedIndex to the db.
func (k *Store) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
return bucket.Put(lastArchivedIndexKey, uint64ToBytes(index))
})
}
// LastArchivedIndexRoot from the db.
func (k *Store) LastArchivedIndexRoot(ctx context.Context) [32]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndexRoot")
defer span.End()
var blockRoot []byte
// #nosec G104. Always returns nil.
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := bucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil {
return nil
}
blockRoot = bucket.Get(lastArchivedIndex)
return nil
})
return bytesutil.ToBytes32(blockRoot)
}
// LastArchivedIndexState from the db.
func (k *Store) LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndexState")
defer span.End()
var s *pb.BeaconState
err := k.db.View(func(tx *bolt.Tx) error {
indexRootBucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := indexRootBucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil {
return nil
}
indexStateBucket := tx.Bucket(archivedIndexStateBucket)
enc := indexStateBucket.Get(lastArchivedIndex)
if enc == nil {
return nil
}
var err error
s, err = createState(enc)
return err
})
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
return state.InitializeFromProtoUnsafe(s)
}
// ArchivedPointState returns the state of an archived point from the DB.
// This is essential for cold state management and to restore a cold state.
func (k *Store) ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error) {

View File

@@ -5,6 +5,7 @@ import (
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
@@ -90,3 +91,62 @@ func TestArchivedPointIndexHas_CanRetrieve(t *testing.T) {
t.Fatal("Should have an archived point")
}
}
func TestLastArchivedPoint_CanRetrieve(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
slot1 := uint64(100)
s1 := &pb.BeaconState{Slot: slot1}
st1, err := state.InitializeFromProto(s1)
if err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointState(ctx, st1, 1); err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'A'}, 1); err != nil {
t.Fatal(err)
}
slot2 := uint64(200)
s2 := &pb.BeaconState{Slot: slot2}
st2, err := state.InitializeFromProto(s2)
if err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointState(ctx, st2, 3); err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'B'}, 3); err != nil {
t.Fatal(err)
}
if err := db.SaveLastArchivedIndex(ctx, 1); err != nil {
t.Fatal(err)
}
lastSaved, err := db.LastArchivedIndexState(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(lastSaved.InnerStateUnsafe(), st1.InnerStateUnsafe()) {
t.Error("Did not get wanted saved state")
}
if db.LastArchivedIndexRoot(ctx) != [32]byte{'A'} {
t.Error("Did not get wanted root")
}
if err := db.SaveLastArchivedIndex(ctx, 3); err != nil {
t.Fatal(err)
}
lastSaved, err = db.LastArchivedIndexState(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(lastSaved.InnerStateUnsafe(), st2.InnerStateUnsafe()) {
t.Error("Did not get wanted saved state")
}
if db.LastArchivedIndexRoot(ctx) != [32]byte{'B'} {
t.Error("Did not get wanted root")
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
log "github.com/sirupsen/logrus"
@@ -320,9 +321,16 @@ func (k *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("no state summary found with head block root")
}
} else {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
}
}
bucket := tx.Bucket(blocksBucket)
return bucket.Put(headBlockRootKey, blockRoot[:])
})

View File

@@ -6,6 +6,7 @@ import (
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace"
)
@@ -63,12 +64,18 @@ func (k *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
return bucket.Put(justifiedCheckpointKey, enc)
})
@@ -85,12 +92,18 @@ func (k *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
if err := bucket.Put(finalizedCheckpointKey, enc); err != nil {

View File

@@ -42,6 +42,7 @@ var (
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastArchivedIndexKey = []byte("last-archived")
// Migration bucket.
migrationBucket = []byte("migrations")

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"go.opencensus.io/trace"
)
@@ -184,9 +185,15 @@ func (k *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(blocksBucket)
headBlkRoot := bkt.Get(headBlockRootKey)
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
bkt = tx.Bucket(stateBucket)
@@ -229,9 +236,15 @@ func (k *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
for blockRoot, _ := c.First(); blockRoot != nil; blockRoot, _ = c.Next() {
if rootMap[bytesutil.ToBytes32(blockRoot)] {
// Safe guard against deleting genesis, finalized, or head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("could not delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
if err := c.Delete(); err != nil {
return err

View File

@@ -13,6 +13,6 @@ go_library(
deps = [
"//shared/cmd:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,31 +1,31 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// ArchiveEnableFlag defines whether or not the beacon chain should archive
// historical blocks, attestations, and validator set changes.
ArchiveEnableFlag = cli.BoolFlag{
ArchiveEnableFlag = &cli.BoolFlag{
Name: "archive",
Usage: "Whether or not beacon chain should archive historical data including blocks, attestations, and validator set changes",
}
// ArchiveValidatorSetChangesFlag defines whether or not the beacon chain should archive
// historical validator set changes in persistent storage.
ArchiveValidatorSetChangesFlag = cli.BoolFlag{
ArchiveValidatorSetChangesFlag = &cli.BoolFlag{
Name: "archive-validator-set-changes",
Usage: "Whether or not beacon chain should archive historical validator set changes",
}
// ArchiveBlocksFlag defines whether or not the beacon chain should archive
// historical block data in persistent storage.
ArchiveBlocksFlag = cli.BoolFlag{
ArchiveBlocksFlag = &cli.BoolFlag{
Name: "archive-blocks",
Usage: "Whether or not beacon chain should archive historical blocks",
}
// ArchiveAttestationsFlag defines whether or not the beacon chain should archive
// historical attestation data in persistent storage.
ArchiveAttestationsFlag = cli.BoolFlag{
ArchiveAttestationsFlag = &cli.BoolFlag{
Name: "archive-attestations",
Usage: "Whether or not beacon chain should archive historical blocks",
}

View File

@@ -1,100 +1,100 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// HTTPWeb3ProviderFlag provides an HTTP access endpoint to an ETH 1.0 RPC.
HTTPWeb3ProviderFlag = cli.StringFlag{
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",
Usage: "A mainchain web3 provider string http endpoint",
Value: "https://goerli.prylabs.net",
}
// Web3ProviderFlag defines a flag for a mainchain RPC endpoint.
Web3ProviderFlag = cli.StringFlag{
Web3ProviderFlag = &cli.StringFlag{
Name: "web3provider",
Usage: "A mainchain web3 provider string endpoint. Can either be an IPC file string or a WebSocket endpoint. Cannot be an HTTP endpoint.",
Value: "wss://goerli.prylabs.net/websocket",
}
// DepositContractFlag defines a flag for the deposit contract address.
DepositContractFlag = cli.StringFlag{
DepositContractFlag = &cli.StringFlag{
Name: "deposit-contract",
Usage: "Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate.",
Value: "0x4689a3C63CE249355C8a573B5974db21D2d1b8Ef",
}
// RPCHost defines the host on which the RPC server should listen.
RPCHost = cli.StringFlag{
RPCHost = &cli.StringFlag{
Name: "rpc-host",
Usage: "Host on which the RPC server should listen",
Value: "0.0.0.0",
}
// RPCPort defines a beacon node RPC port to open.
RPCPort = cli.IntFlag{
RPCPort = &cli.IntFlag{
Name: "rpc-port",
Usage: "RPC port exposed by a beacon node",
Value: 4000,
}
// RPCMaxPageSize defines the maximum numbers per page returned in RPC responses from this
// beacon node (default: 500).
RPCMaxPageSize = cli.IntFlag{
RPCMaxPageSize = &cli.IntFlag{
Name: "rpc-max-page-size",
Usage: "Max number of items returned per page in RPC responses for paginated endpoints (default: 500)",
Usage: "Max number of items returned per page in RPC responses for paginated endpoints.",
Value: 500,
}
// CertFlag defines a flag for the node's TLS certificate.
CertFlag = cli.StringFlag{
CertFlag = &cli.StringFlag{
Name: "tls-cert",
Usage: "Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.",
}
// KeyFlag defines a flag for the node's TLS key.
KeyFlag = cli.StringFlag{
KeyFlag = &cli.StringFlag{
Name: "tls-key",
Usage: "Key for secure gRPC. Pass this and the tls-cert flag in order to use gRPC securely.",
}
// GRPCGatewayPort enables a gRPC gateway to be exposed for Prysm.
GRPCGatewayPort = cli.IntFlag{
GRPCGatewayPort = &cli.IntFlag{
Name: "grpc-gateway-port",
Usage: "Enable gRPC gateway for JSON requests",
}
// MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers.
MinSyncPeers = cli.IntFlag{
MinSyncPeers = &cli.IntFlag{
Name: "min-sync-peers",
Usage: "The required number of valid peers to connect with before syncing.",
Value: 3,
}
// ContractDeploymentBlock is the block in which the eth1 deposit contract was deployed.
ContractDeploymentBlock = cli.IntFlag{
ContractDeploymentBlock = &cli.IntFlag{
Name: "contract-deployment-block",
Usage: "The eth1 block in which the deposit contract was deployed.",
Value: 1960177,
}
// SetGCPercent is the percentage of current live allocations at which the garbage collector is to run.
SetGCPercent = cli.IntFlag{
SetGCPercent = &cli.IntFlag{
Name: "gc-percent",
Usage: "The percentage of freshly allocated data to live data on which the gc will be run again.",
Value: 100,
}
// UnsafeSync starts the beacon node from the previously saved head state and syncs from there.
UnsafeSync = cli.BoolFlag{
UnsafeSync = &cli.BoolFlag{
Name: "unsafe-sync",
Usage: "Starts the beacon node with the previously saved head state instead of finalized state.",
}
// SlasherCertFlag defines a flag for the slasher TLS certificate.
SlasherCertFlag = cli.StringFlag{
SlasherCertFlag = &cli.StringFlag{
Name: "slasher-tls-cert",
Usage: "Certificate for secure slasher gRPC connection. Pass this in order to use slasher gRPC securely.",
}
// SlasherProviderFlag defines a flag for a slasher RPC provider.
SlasherProviderFlag = cli.StringFlag{
SlasherProviderFlag = &cli.StringFlag{
Name: "slasher-provider",
Usage: "A slasher provider string endpoint. Can either be an grpc server endpoint.",
Value: "127.0.0.1:5000",
}
// SlotsPerArchivedPoint specifies the number of slots between the archived points, to save beacon state in the cold
// section of DB.
SlotsPerArchivedPoint = cli.IntFlag{
SlotsPerArchivedPoint = &cli.IntFlag{
Name: "slots-per-archive-point",
Usage: "The slot durations of when an archived state gets saved in the DB.",
Value: 128,

View File

@@ -3,7 +3,7 @@ package flags
import (
"github.com/prysmaticlabs/prysm/shared/cmd"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
// GlobalFlags specifies all the global flags for the
@@ -38,31 +38,31 @@ func Init(c *GlobalFlags) {
// based on the provided cli context.
func ConfigureGlobalFlags(ctx *cli.Context) {
cfg := &GlobalFlags{}
if ctx.GlobalBool(ArchiveEnableFlag.Name) {
if ctx.Bool(ArchiveEnableFlag.Name) {
cfg.EnableArchive = true
}
if ctx.GlobalBool(ArchiveValidatorSetChangesFlag.Name) {
if ctx.Bool(ArchiveValidatorSetChangesFlag.Name) {
cfg.EnableArchivedValidatorSetChanges = true
}
if ctx.GlobalBool(ArchiveBlocksFlag.Name) {
if ctx.Bool(ArchiveBlocksFlag.Name) {
cfg.EnableArchivedBlocks = true
}
if ctx.GlobalBool(ArchiveAttestationsFlag.Name) {
if ctx.Bool(ArchiveAttestationsFlag.Name) {
cfg.EnableArchivedAttestations = true
}
if ctx.GlobalBool(UnsafeSync.Name) {
if ctx.Bool(UnsafeSync.Name) {
cfg.UnsafeSync = true
}
cfg.MaxPageSize = ctx.GlobalInt(RPCMaxPageSize.Name)
cfg.DeploymentBlock = ctx.GlobalInt(ContractDeploymentBlock.Name)
cfg.MaxPageSize = ctx.Int(RPCMaxPageSize.Name)
cfg.DeploymentBlock = ctx.Int(ContractDeploymentBlock.Name)
configureMinimumPeers(ctx, cfg)
Init(cfg)
}
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
cfg.MinimumSyncPeers = ctx.GlobalInt(MinSyncPeers.Name)
maxPeers := int(ctx.GlobalInt64(cmd.P2PMaxPeers.Name))
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
maxPeers := int(ctx.Int64(cmd.P2PMaxPeers.Name))
if cfg.MinimumSyncPeers > maxPeers {
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
cfg.MinimumSyncPeers = maxPeers

View File

@@ -1,29 +1,29 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// InteropGenesisStateFlag defines a flag for the beacon node to load genesis state via file.
InteropGenesisStateFlag = cli.StringFlag{
InteropGenesisStateFlag = &cli.StringFlag{
Name: "interop-genesis-state",
Usage: "The genesis state file (.SSZ) to load from",
}
// InteropMockEth1DataVotesFlag enables mocking the eth1 proof-of-work chain data put into blocks by proposers.
InteropMockEth1DataVotesFlag = cli.BoolFlag{
InteropMockEth1DataVotesFlag = &cli.BoolFlag{
Name: "interop-eth1data-votes",
Usage: "Enable mocking of eth1 data votes for proposers to package into blocks",
}
// InteropGenesisTimeFlag specifies genesis time for state generation.
InteropGenesisTimeFlag = cli.Uint64Flag{
InteropGenesisTimeFlag = &cli.Uint64Flag{
Name: "interop-genesis-time",
Usage: "Specify the genesis time for interop genesis state generation. Must be used with " +
"--interop-num-validators",
}
// InteropNumValidatorsFlag specifies number of genesis validators for state generation.
InteropNumValidatorsFlag = cli.Uint64Flag{
InteropNumValidatorsFlag = &cli.Uint64Flag{
Name: "interop-num-validators",
Usage: "Specify number of genesis validators to generate for interop. Must be used with --interop-genesis-time",
}

View File

@@ -17,10 +17,11 @@ import (
"github.com/prysmaticlabs/prysm/shared/logutil"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
gologging "github.com/whyrusleeping/go-logging"
prefixed "github.com/x-cray/logrus-prefixed-formatter"
_ "go.uber.org/automaxprocs"
"gopkg.in/urfave/cli.v2/altsrc"
)
var appFlags = []cli.Flag{
@@ -79,15 +80,16 @@ var appFlags = []cli.Flag{
debug.TraceFlag,
cmd.LogFileName,
cmd.EnableUPnPFlag,
cmd.ConfigFileFlag,
}
func init() {
appFlags = append(appFlags, featureconfig.BeaconChainFlags...)
appFlags = cmd.WrapFlags(append(appFlags, featureconfig.BeaconChainFlags...))
}
func main() {
log := logrus.WithField("prefix", "main")
app := cli.NewApp()
app := cli.App{}
app.Name = "beacon-chain"
app.Usage = "this is a beacon chain implementation for Ethereum 2.0"
app.Action = startNode
@@ -96,7 +98,14 @@ func main() {
app.Flags = appFlags
app.Before = func(ctx *cli.Context) error {
format := ctx.GlobalString(cmd.LogFormat.Name)
// Load any flags from file, if specified.
if ctx.IsSet(cmd.ConfigFileFlag.Name) {
if err := altsrc.InitInputSourceWithContext(appFlags, altsrc.NewYamlSourceFromFlagFunc(cmd.ConfigFileFlag.Name))(ctx); err != nil {
return err
}
}
format := ctx.String(cmd.LogFormat.Name)
switch format {
case "text":
formatter := new(prefixed.TextFormatter)
@@ -104,7 +113,7 @@ func main() {
formatter.FullTimestamp = true
// If persistent log files are written - we disable the log messages coloring because
// the colors are ANSI codes and seen as gibberish in the log files.
formatter.DisableColors = ctx.GlobalString(cmd.LogFileName.Name) != ""
formatter.DisableColors = ctx.String(cmd.LogFileName.Name) != ""
logrus.SetFormatter(formatter)
break
case "fluentd":
@@ -121,7 +130,7 @@ func main() {
return fmt.Errorf("unknown log format %s", format)
}
logFileName := ctx.GlobalString(cmd.LogFileName.Name)
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logutil.ConfigurePersistentLogging(logFileName); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
@@ -129,7 +138,7 @@ func main() {
}
if ctx.IsSet(flags.SetGCPercent.Name) {
runtimeDebug.SetGCPercent(ctx.GlobalInt(flags.SetGCPercent.Name))
runtimeDebug.SetGCPercent(ctx.Int(flags.SetGCPercent.Name))
}
runtime.GOMAXPROCS(runtime.NumCPU())
return debug.Setup(ctx)
@@ -149,7 +158,7 @@ func main() {
}
func startNode(ctx *cli.Context) error {
verbosity := ctx.GlobalString(cmd.VerbosityFlag.Name)
verbosity := ctx.String(cmd.VerbosityFlag.Name)
level, err := logrus.ParseLevel(verbosity)
if err != nil {
return err

View File

@@ -21,8 +21,10 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/rpc:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/sync/initial-sync-old:go_default_library",
"//shared:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
@@ -36,7 +38,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)
@@ -49,6 +51,6 @@ go_test(
"//beacon-chain/core/feed/state:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -31,8 +31,10 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
prysmsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
initialsyncold "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync-old"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/debug"
@@ -44,7 +46,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/tracing"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var log = logrus.WithField("prefix", "node")
@@ -69,6 +71,7 @@ type BeaconNode struct {
blockFeed *event.Feed
opFeed *event.Feed
forkChoiceStore forkchoice.ForkChoicer
stateGen *stategen.State
}
// NewBeaconNode creates a new node instance, sets up configuration options, and registers
@@ -76,10 +79,10 @@ type BeaconNode struct {
func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
if err := tracing.Setup(
"beacon-chain", // service name
ctx.GlobalString(cmd.TracingProcessNameFlag.Name),
ctx.GlobalString(cmd.TracingEndpointFlag.Name),
ctx.GlobalFloat64(cmd.TraceSampleFractionFlag.Name),
ctx.GlobalBool(cmd.EnableTracingFlag.Name),
ctx.String(cmd.TracingProcessNameFlag.Name),
ctx.String(cmd.TracingEndpointFlag.Name),
ctx.Float64(cmd.TraceSampleFractionFlag.Name),
ctx.Bool(cmd.EnableTracingFlag.Name),
); err != nil {
return nil, err
}
@@ -104,6 +107,8 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
return nil, err
}
beacon.startStateGen()
if err := beacon.registerP2P(ctx); err != nil {
return nil, err
}
@@ -112,7 +117,7 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
return nil, err
}
if err := beacon.registerAttestationPool(ctx); err != nil {
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
@@ -146,7 +151,7 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
return nil, err
}
if !ctx.GlobalBool(cmd.DisableMonitoringFlag.Name) {
if !ctx.Bool(cmd.DisableMonitoringFlag.Name) {
if err := beacon.registerPrometheusService(ctx); err != nil {
return nil, err
}
@@ -223,10 +228,10 @@ func (b *BeaconNode) startForkChoice() {
}
func (b *BeaconNode) startDB(ctx *cli.Context) error {
baseDir := ctx.GlobalString(cmd.DataDirFlag.Name)
baseDir := ctx.String(cmd.DataDirFlag.Name)
dbPath := path.Join(baseDir, beaconChainDBName)
clearDB := ctx.GlobalBool(cmd.ClearDB.Name)
forceClearDB := ctx.GlobalBool(cmd.ForceClearDB.Name)
clearDB := ctx.Bool(cmd.ClearDB.Name)
forceClearDB := ctx.Bool(cmd.ForceClearDB.Name)
d, err := db.NewDB(dbPath)
if err != nil {
@@ -258,9 +263,13 @@ func (b *BeaconNode) startDB(ctx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen() {
b.stateGen = stategen.New(b.db)
}
func (b *BeaconNode) registerP2P(ctx *cli.Context) error {
// Bootnode ENR may be a filepath to an ENR file.
bootnodeAddrs := strings.Split(ctx.GlobalString(cmd.BootstrapNode.Name), ",")
bootnodeAddrs := strings.Split(ctx.String(cmd.BootstrapNode.Name), ",")
for i, addr := range bootnodeAddrs {
if filepath.Ext(addr) == ".enr" {
b, err := ioutil.ReadFile(addr)
@@ -271,22 +280,27 @@ func (b *BeaconNode) registerP2P(ctx *cli.Context) error {
}
}
datadir := ctx.String(cmd.DataDirFlag.Name)
if datadir == "" {
datadir = cmd.DefaultDataDir()
}
svc, err := p2p.NewService(&p2p.Config{
NoDiscovery: ctx.GlobalBool(cmd.NoDiscovery.Name),
StaticPeers: sliceutil.SplitCommaSeparated(ctx.GlobalStringSlice(cmd.StaticPeers.Name)),
NoDiscovery: ctx.Bool(cmd.NoDiscovery.Name),
StaticPeers: sliceutil.SplitCommaSeparated(ctx.StringSlice(cmd.StaticPeers.Name)),
BootstrapNodeAddr: bootnodeAddrs,
RelayNodeAddr: ctx.GlobalString(cmd.RelayNode.Name),
DataDir: ctx.GlobalString(cmd.DataDirFlag.Name),
LocalIP: ctx.GlobalString(cmd.P2PIP.Name),
HostAddress: ctx.GlobalString(cmd.P2PHost.Name),
HostDNS: ctx.GlobalString(cmd.P2PHostDNS.Name),
PrivateKey: ctx.GlobalString(cmd.P2PPrivKey.Name),
TCPPort: ctx.GlobalUint(cmd.P2PTCPPort.Name),
UDPPort: ctx.GlobalUint(cmd.P2PUDPPort.Name),
MaxPeers: ctx.GlobalUint(cmd.P2PMaxPeers.Name),
WhitelistCIDR: ctx.GlobalString(cmd.P2PWhitelist.Name),
EnableUPnP: ctx.GlobalBool(cmd.EnableUPnPFlag.Name),
Encoding: ctx.GlobalString(cmd.P2PEncoding.Name),
RelayNodeAddr: ctx.String(cmd.RelayNode.Name),
DataDir: datadir,
LocalIP: ctx.String(cmd.P2PIP.Name),
HostAddress: ctx.String(cmd.P2PHost.Name),
HostDNS: ctx.String(cmd.P2PHostDNS.Name),
PrivateKey: ctx.String(cmd.P2PPrivKey.Name),
TCPPort: ctx.Uint(cmd.P2PTCPPort.Name),
UDPPort: ctx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: ctx.Uint(cmd.P2PMaxPeers.Name),
WhitelistCIDR: ctx.String(cmd.P2PWhitelist.Name),
EnableUPnP: ctx.Bool(cmd.EnableUPnPFlag.Name),
Encoding: ctx.String(cmd.P2PEncoding.Name),
})
if err != nil {
return err
@@ -302,13 +316,28 @@ func (b *BeaconNode) fetchP2P(ctx *cli.Context) p2p.P2P {
return p
}
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(context.Background(), &attestations.Config{
Pool: b.attestationPool,
})
if err != nil {
return errors.Wrap(err, "could not register atts pool service")
}
return b.services.RegisterService(s)
}
func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
var web3Service *powchain.Service
if err := b.services.FetchService(&web3Service); err != nil {
return err
}
maxRoutines := ctx.GlobalInt64(cmd.MaxGoroutines.Name)
var opsService *attestations.Service
if err := b.services.FetchService(&opsService); err != nil {
return err
}
maxRoutines := ctx.Int64(cmd.MaxGoroutines.Name)
blockchainService, err := blockchain.NewService(context.Background(), &blockchain.Config{
BeaconDB: b.db,
DepositCache: b.depositCache,
@@ -320,6 +349,8 @@ func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
MaxRoutines: maxRoutines,
StateNotifier: b,
ForkChoiceStore: b.forkChoiceStore,
OpsService: opsService,
StateGen: b.stateGen,
})
if err != nil {
return errors.Wrap(err, "could not register blockchain service")
@@ -327,21 +358,11 @@ func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
return b.services.RegisterService(blockchainService)
}
func (b *BeaconNode) registerAttestationPool(ctx *cli.Context) error {
attPoolService, err := attestations.NewService(context.Background(), &attestations.Config{
Pool: b.attestationPool,
})
if err != nil {
return err
}
return b.services.RegisterService(attPoolService)
}
func (b *BeaconNode) registerPOWChainService(cliCtx *cli.Context) error {
if cliCtx.GlobalBool(testSkipPowFlag) {
if cliCtx.Bool(testSkipPowFlag) {
return b.services.RegisterService(&powchain.Service{})
}
depAddress := cliCtx.GlobalString(flags.DepositContractFlag.Name)
depAddress := cliCtx.String(flags.DepositContractFlag.Name)
if depAddress == "" {
log.Fatal(fmt.Sprintf("%s is required", flags.DepositContractFlag.Name))
}
@@ -352,8 +373,8 @@ func (b *BeaconNode) registerPOWChainService(cliCtx *cli.Context) error {
ctx := context.Background()
cfg := &powchain.Web3ServiceConfig{
ETH1Endpoint: cliCtx.GlobalString(flags.Web3ProviderFlag.Name),
HTTPEndPoint: cliCtx.GlobalString(flags.HTTPWeb3ProviderFlag.Name),
ETH1Endpoint: cliCtx.String(flags.Web3ProviderFlag.Name),
HTTPEndPoint: cliCtx.String(flags.HTTPWeb3ProviderFlag.Name),
DepositContract: common.HexToAddress(depAddress),
BeaconDB: b.db,
DepositCache: b.depositCache,
@@ -389,9 +410,19 @@ func (b *BeaconNode) registerSyncService(ctx *cli.Context) error {
return err
}
var initSync *initialsync.Service
if err := b.services.FetchService(&initSync); err != nil {
return err
var initSync prysmsync.Checker
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
var initSyncTmp *initialsync.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
initSync = initSyncTmp
} else {
var initSyncTmp *initialsyncold.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
initSync = initSyncTmp
}
rs := prysmsync.NewRegularSync(&prysmsync.Config{
@@ -404,6 +435,7 @@ func (b *BeaconNode) registerSyncService(ctx *cli.Context) error {
AttestationNotifier: b,
AttPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingPool: b.slashingsPool,
})
return b.services.RegisterService(rs)
@@ -415,16 +447,25 @@ func (b *BeaconNode) registerInitialSyncService(ctx *cli.Context) error {
return err
}
is := initialsync.NewInitialSync(&initialsync.Config{
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
is := initialsync.NewInitialSync(&initialsync.Config{
DB: b.db,
Chain: chainService,
P2P: b.fetchP2P(ctx),
StateNotifier: b,
BlockNotifier: b,
})
return b.services.RegisterService(is)
}
is := initialsyncold.NewInitialSync(&initialsyncold.Config{
DB: b.db,
Chain: chainService,
P2P: b.fetchP2P(ctx),
StateNotifier: b,
BlockNotifier: b,
})
return b.services.RegisterService(is)
}
func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
@@ -438,13 +479,23 @@ func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
return err
}
var syncService *initialsync.Service
if err := b.services.FetchService(&syncService); err != nil {
return err
var syncService prysmsync.Checker
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
var initSyncTmp *initialsync.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
syncService = initSyncTmp
} else {
var initSyncTmp *initialsyncold.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
syncService = initSyncTmp
}
genesisValidators := ctx.GlobalUint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := ctx.GlobalString(flags.InteropGenesisStateFlag.Name)
genesisValidators := ctx.Uint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := ctx.String(flags.InteropGenesisStateFlag.Name)
var depositFetcher depositcache.DepositFetcher
var chainStartFetcher powchain.ChainStartFetcher
if genesisValidators > 0 || genesisStatePath != "" {
@@ -459,14 +510,14 @@ func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
chainStartFetcher = web3Service
}
host := ctx.GlobalString(flags.RPCHost.Name)
port := ctx.GlobalString(flags.RPCPort.Name)
cert := ctx.GlobalString(flags.CertFlag.Name)
key := ctx.GlobalString(flags.KeyFlag.Name)
slasherCert := ctx.GlobalString(flags.SlasherCertFlag.Name)
slasherProvider := ctx.GlobalString(flags.SlasherProviderFlag.Name)
host := ctx.String(flags.RPCHost.Name)
port := ctx.String(flags.RPCPort.Name)
cert := ctx.String(flags.CertFlag.Name)
key := ctx.String(flags.KeyFlag.Name)
slasherCert := ctx.String(flags.SlasherCertFlag.Name)
slasherProvider := ctx.String(flags.SlasherProviderFlag.Name)
mockEth1DataVotes := ctx.GlobalBool(flags.InteropMockEth1DataVotesFlag.Name)
mockEth1DataVotes := ctx.Bool(flags.InteropMockEth1DataVotesFlag.Name)
rpcService := rpc.NewService(context.Background(), &rpc.Config{
Host: host,
Port: port,
@@ -496,6 +547,7 @@ func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
OperationNotifier: b,
SlasherCert: slasherCert,
SlasherProvider: slasherProvider,
StateGen: b.stateGen,
})
return b.services.RegisterService(rpcService)
@@ -521,7 +573,7 @@ func (b *BeaconNode) registerPrometheusService(ctx *cli.Context) error {
additionalHandlers = append(additionalHandlers, prometheus.Handler{Path: "/tree", Handler: c.TreeHandler})
service := prometheus.NewPrometheusService(
fmt.Sprintf(":%d", ctx.GlobalInt64(cmd.MonitoringPortFlag.Name)),
fmt.Sprintf(":%d", ctx.Int64(cmd.MonitoringPortFlag.Name)),
b.services,
additionalHandlers...,
)
@@ -531,9 +583,9 @@ func (b *BeaconNode) registerPrometheusService(ctx *cli.Context) error {
}
func (b *BeaconNode) registerGRPCGateway(ctx *cli.Context) error {
gatewayPort := ctx.GlobalInt(flags.GRPCGatewayPort.Name)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
if gatewayPort > 0 {
selfAddress := fmt.Sprintf("127.0.0.1:%d", ctx.GlobalInt(flags.RPCPort.Name))
selfAddress := fmt.Sprintf("127.0.0.1:%d", ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("0.0.0.0:%d", gatewayPort)
return b.services.RegisterService(gateway.New(context.Background(), selfAddress, gatewayAddress, nil /*optional mux*/))
}
@@ -541,9 +593,9 @@ func (b *BeaconNode) registerGRPCGateway(ctx *cli.Context) error {
}
func (b *BeaconNode) registerInteropServices(ctx *cli.Context) error {
genesisTime := ctx.GlobalUint64(flags.InteropGenesisTimeFlag.Name)
genesisValidators := ctx.GlobalUint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := ctx.GlobalString(flags.InteropGenesisStateFlag.Name)
genesisTime := ctx.Uint64(flags.InteropGenesisTimeFlag.Name)
genesisValidators := ctx.Uint64(flags.InteropNumValidatorsFlag.Name)
genesisStatePath := ctx.String(flags.InteropGenesisStateFlag.Name)
if genesisValidators > 0 || genesisStatePath != "" {
svc := interopcoldstart.NewColdStartService(context.Background(), &interopcoldstart.Config{

View File

@@ -9,7 +9,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
// Ensure BeaconNode implements interfaces.
@@ -22,7 +22,7 @@ func TestNodeClose_OK(t *testing.T) {
tmp := fmt.Sprintf("%s/datadirtest2", testutil.TempDir())
os.RemoveAll(tmp)
app := cli.NewApp()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String("web3provider", "ws//127.0.0.1:8546", "web3 provider ws or IPC endpoint")
set.Bool("test-skip-pow", true, "skip pow dial")
@@ -31,7 +31,7 @@ func TestNodeClose_OK(t *testing.T) {
set.Bool("demo-config", true, "demo configuration")
set.String("deposit-contract", "0x0000000000000000000000000000000000000000", "deposit contract address")
context := cli.NewContext(app, set, nil)
context := cli.NewContext(&app, set, nil)
node, err := NewBeaconNode(context)
if err != nil {

View File

@@ -8,6 +8,7 @@ go_library(
"metrics.go",
"pool.go",
"prepare_forkchoice.go",
"prune_expired.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations",
@@ -18,7 +19,8 @@ go_library(
"//beacon-chain/state:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"//shared/roughtime:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
@@ -35,6 +37,7 @@ go_test(
"aggregate_test.go",
"pool_test.go",
"prepare_forkchoice_test.go",
"prune_expired_test.go",
"service_test.go",
],
embed = [":go_default_library"],
@@ -42,6 +45,8 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//shared/bls:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -13,8 +13,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//shared/params:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"//beacon-chain/state:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
@@ -31,10 +30,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//shared/params:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
],
)

View File

@@ -1,13 +1,11 @@
package kv
import (
"time"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveAggregatedAttestation saves an aggregated attestation in cache.
@@ -20,33 +18,21 @@ func (p *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
d, expTime, ok := p.aggregatedAtt.GetWithExpiration(string(r[:]))
// If we have not seen the attestation data before, store in in cache with
// the default expiration time out.
copiedAtt := stateTrie.CopyAttestation(att)
p.aggregatedAttLock.Lock()
defer p.aggregatedAttLock.Unlock()
atts, ok := p.aggregatedAtt[r]
if !ok {
atts := []*ethpb.Attestation{att}
p.aggregatedAtt.Set(string(r[:]), atts, cache.DefaultExpiration)
atts := []*ethpb.Attestation{copiedAtt}
p.aggregatedAtt[r] = atts
return nil
}
atts, ok := d.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
atts, err = helpers.AggregateAttestations(append(atts, att))
atts, err = helpers.AggregateAttestations(append(atts, copiedAtt))
if err != nil {
return err
}
// Delete attestation if the current time has passed the expiration time.
if time.Now().Unix() >= expTime.Unix() {
p.aggregatedAtt.Delete(string(r[:]))
return nil
}
// Reset expiration time given how much time has passed.
expDuration := time.Duration(expTime.Unix() - time.Now().Unix())
p.aggregatedAtt.Set(string(r[:]), atts, expDuration*time.Second)
p.aggregatedAtt[r] = atts
return nil
}
@@ -63,17 +49,11 @@ func (p *AttCaches) SaveAggregatedAttestations(atts []*ethpb.Attestation) error
// AggregatedAttestations returns the aggregated attestations in cache.
func (p *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// Delete all expired aggregated attestations before returning them.
p.aggregatedAtt.DeleteExpired()
atts := make([]*ethpb.Attestation, 0)
atts := make([]*ethpb.Attestation, 0, p.aggregatedAtt.ItemCount())
for s, i := range p.aggregatedAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
a, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.aggregatedAtt.Delete(s)
continue
}
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
for _, a := range p.aggregatedAtt {
atts = append(atts, a...)
}
@@ -83,19 +63,11 @@ func (p *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (p *AttCaches) AggregatedAttestationsBySlotIndex(slot uint64, committeeIndex uint64) []*ethpb.Attestation {
// Delete all expired aggregated attestations before returning them.
p.aggregatedAtt.DeleteExpired()
atts := make([]*ethpb.Attestation, 0, p.aggregatedAtt.ItemCount())
for s, i := range p.aggregatedAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
a, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.aggregatedAtt.Delete(s)
continue
}
atts := make([]*ethpb.Attestation, 0)
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
for _, a := range p.aggregatedAtt {
if slot == a[0].Data.Slot && committeeIndex == a[0].Data.CommitteeIndex {
atts = append(atts, a...)
}
@@ -113,31 +85,24 @@ func (p *AttCaches) DeleteAggregatedAttestation(att *ethpb.Attestation) error {
if err != nil {
return errors.Wrap(err, "could not tree hash attestation data")
}
a, expTime, ok := p.aggregatedAtt.GetWithExpiration(string(r[:]))
p.aggregatedAttLock.Lock()
defer p.aggregatedAttLock.Unlock()
attList, ok := p.aggregatedAtt[r]
if !ok {
return nil
}
atts, ok := a.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
filtered := make([]*ethpb.Attestation, 0)
for _, a := range atts {
for _, a := range attList {
if !att.AggregationBits.Contains(a.AggregationBits) {
filtered = append(filtered, a)
}
}
if len(filtered) == 0 {
p.aggregatedAtt.Delete(string(r[:]))
delete(p.aggregatedAtt, r)
} else {
// Delete attestation if the current time has passed the expiration time.
if time.Now().Unix() >= expTime.Unix() {
p.aggregatedAtt.Delete(string(r[:]))
return nil
}
// Reset expiration time given how much time has passed.
expDuration := time.Duration(expTime.Unix() - time.Now().Unix())
p.aggregatedAtt.Set(string(r[:]), filtered, expDuration*time.Second)
p.aggregatedAtt[r] = filtered
}
return nil
@@ -150,16 +115,20 @@ func (p *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
return false, errors.Wrap(err, "could not tree hash attestation")
}
if atts, ok := p.aggregatedAtt.Get(string(r[:])); ok {
for _, a := range atts.([]*ethpb.Attestation) {
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
if atts, ok := p.aggregatedAtt[r]; ok {
for _, a := range atts {
if a.AggregationBits.Contains(att.AggregationBits) {
return true, nil
}
}
}
if atts, ok := p.blockAtt.Get(string(r[:])); ok {
for _, a := range atts.([]*ethpb.Attestation) {
p.blockAttLock.RLock()
defer p.blockAttLock.RUnlock()
if atts, ok := p.blockAtt[r]; ok {
for _, a := range atts {
if a.AggregationBits.Contains(att.AggregationBits) {
return true, nil
}
@@ -171,5 +140,7 @@ func (p *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
// AggregatedAttestationCount returns the number of aggregated attestations key in the pool.
func (p *AttCaches) AggregatedAttestationCount() int {
return p.aggregatedAtt.ItemCount()
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
return len(p.aggregatedAtt)
}

View File

@@ -1,18 +1,13 @@
package kv
import (
"math"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Aggregated_NotAggregated(t *testing.T) {
@@ -51,64 +46,6 @@ func TestKV_Aggregated_CanSaveRetrieve(t *testing.T) {
}
}
func TestKV_Aggregated_SaveAndVerifyExpireTime(t *testing.T) {
cache := NewAttCaches()
d := &ethpb.AttestationData{Slot: 1}
att1 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11100}}
att2 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b10110}}
att3 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11011}}
r, err := ssz.HashTreeRoot(d)
if err != nil {
t.Fatal(err)
}
if err := cache.SaveAggregatedAttestation(att1); err != nil {
t.Fatal(err)
}
a, expTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(a.([]*ethpb.Attestation)) != 1 {
t.Fatal("Did not save attestations")
}
// Let time pass by one second to test expiration time.
time.Sleep(1 * time.Second)
// Save attestation 2 too the pool, the expiration time should not change.
if err := cache.SaveAggregatedAttestation(att2); err != nil {
t.Fatal(err)
}
newA, newExpTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(newA.([]*ethpb.Attestation)) != 2 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
// Let time pass by another second to test expiration time.
time.Sleep(1 * time.Second)
// Save attestation 3 too the pool, the expiration time should not change.
if err := cache.SaveAggregatedAttestation(att3); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ = cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 3 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
}
func TestKV_Aggregated_CanDelete(t *testing.T) {
cache := NewAttCaches()
@@ -138,90 +75,6 @@ func TestKV_Aggregated_CanDelete(t *testing.T) {
}
}
func TestKV_Aggregated_DeleteAndVerifyExpireTime(t *testing.T) {
cache := NewAttCaches()
d := &ethpb.AttestationData{Slot: 1}
att1 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11100}}
att2 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b10110}}
att3 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11011}}
atts := []*ethpb.Attestation{att1, att2, att3}
for _, att := range atts {
if err := cache.SaveAggregatedAttestation(att); err != nil {
t.Fatal(err)
}
}
r, err := ssz.HashTreeRoot(d)
if err != nil {
t.Fatal(err)
}
a, expTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(a.([]*ethpb.Attestation)) != 3 {
t.Fatal("Did not save attestations")
}
// Let time pass by one second to test expiration time.
time.Sleep(1 * time.Second)
// Delete attestation 1 from the pool, the expiration time should not change.
if err := cache.DeleteAggregatedAttestation(att1); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 2 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
// Let time pass by another second to test expiration time.
time.Sleep(1 * time.Second)
// Delete attestation 1 from the pool, the expiration time should not change.
if err := cache.DeleteAggregatedAttestation(att2); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ = cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 1 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
}
func TestKV_Aggregated_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att.Data)
if err := cache.SaveAggregatedAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.([]*ethpb.Attestation)[0]
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}
func TestKV_HasAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -1,10 +1,10 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveBlockAttestation saves an block attestation in cache.
@@ -14,15 +14,11 @@ func (p *AttCaches) SaveBlockAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
var atts []*ethpb.Attestation
d, ok := p.blockAtt.Get(string(r[:]))
p.blockAttLock.Lock()
defer p.blockAttLock.Unlock()
atts, ok := p.blockAtt[r]
if !ok {
atts = make([]*ethpb.Attestation, 0)
} else {
atts, ok = d.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
}
// Ensure that this attestation is not already fully contained in an existing attestation.
@@ -31,11 +27,8 @@ func (p *AttCaches) SaveBlockAttestation(att *ethpb.Attestation) error {
return nil
}
}
atts = append(atts, att)
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.blockAtt.Set(string(r[:]), atts, cache.DefaultExpiration)
p.blockAtt[r] = append(atts, stateTrie.CopyAttestation(att))
return nil
}
@@ -53,14 +46,11 @@ func (p *AttCaches) SaveBlockAttestations(atts []*ethpb.Attestation) error {
// BlockAttestations returns the block attestations in cache.
func (p *AttCaches) BlockAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.blockAtt.ItemCount())
for s, i := range p.blockAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.blockAtt.Delete(s)
continue
}
atts := make([]*ethpb.Attestation, 0)
p.blockAttLock.RLock()
defer p.blockAttLock.RUnlock()
for _, att := range p.blockAtt {
atts = append(atts, att...)
}
@@ -74,7 +64,9 @@ func (p *AttCaches) DeleteBlockAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
p.blockAtt.Delete(string(r[:]))
p.blockAttLock.Lock()
defer p.blockAttLock.Unlock()
delete(p.blockAtt, r)
return nil
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_BlockAttestation_CanSaveRetrieve(t *testing.T) {
@@ -67,30 +62,3 @@ func TestKV_BlockAttestation_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_BlockAttestation_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att.Data)
if err := cache.SaveBlockAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.blockAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.([]*ethpb.Attestation)[0]
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -1,10 +1,10 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
@@ -14,9 +14,9 @@ func (p *AttCaches) SaveForkchoiceAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.forkchoiceAtt.Set(string(r[:]), att, cache.DefaultExpiration)
p.forkchoiceAttLock.Lock()
defer p.forkchoiceAttLock.Unlock()
p.forkchoiceAtt[r] = stateTrie.CopyAttestation(att) // Copied.
return nil
}
@@ -34,15 +34,12 @@ func (p *AttCaches) SaveForkchoiceAttestations(atts []*ethpb.Attestation) error
// ForkchoiceAttestations returns the forkchoice attestations in cache.
func (p *AttCaches) ForkchoiceAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.forkchoiceAtt.ItemCount())
for s, i := range p.forkchoiceAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.(*ethpb.Attestation)
if !ok {
p.forkchoiceAtt.Delete(s)
continue
}
atts = append(atts, att)
atts := make([]*ethpb.Attestation, 0)
p.forkchoiceAttLock.RLock()
defer p.forkchoiceAttLock.RUnlock()
for _, att := range p.forkchoiceAtt {
atts = append(atts, stateTrie.CopyAttestation(att) /* Copied */)
}
return atts
@@ -55,7 +52,9 @@ func (p *AttCaches) DeleteForkchoiceAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
p.forkchoiceAtt.Delete(string(r[:]))
p.forkchoiceAttLock.Lock()
defer p.forkchoiceAttLock.Unlock()
delete(p.forkchoiceAtt, r)
return nil
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
@@ -67,30 +62,3 @@ func TestKV_Forkchoice_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_Forkchoice_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att)
if err := cache.SaveForkchoiceAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.forkchoiceAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.(*ethpb.Attestation)
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -1,34 +1,33 @@
package kv
import (
"time"
"sync"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/shared/params"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
)
// AttCaches defines the caches used to satisfy attestation pool interface.
// These caches are KV store for various attestations
// such are unaggregated, aggregated or attestations within a block.
type AttCaches struct {
aggregatedAtt *cache.Cache
unAggregatedAtt *cache.Cache
forkchoiceAtt *cache.Cache
blockAtt *cache.Cache
aggregatedAttLock sync.RWMutex
aggregatedAtt map[[32]byte][]*ethpb.Attestation
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[[32]byte]*ethpb.Attestation
forkchoiceAttLock sync.RWMutex
forkchoiceAtt map[[32]byte]*ethpb.Attestation
blockAttLock sync.RWMutex
blockAtt map[[32]byte][]*ethpb.Attestation
}
// NewAttCaches initializes a new attestation pool consists of multiple KV store in cache for
// various kind of attestations.
func NewAttCaches() *AttCaches {
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
// Create caches with default expiration time of one epoch and which
// purges expired items every epoch.
pool := &AttCaches{
unAggregatedAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
aggregatedAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
forkchoiceAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
blockAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
unAggregatedAtt: make(map[[32]byte]*ethpb.Attestation),
aggregatedAtt: make(map[[32]byte][]*ethpb.Attestation),
forkchoiceAtt: make(map[[32]byte]*ethpb.Attestation),
blockAtt: make(map[[32]byte][]*ethpb.Attestation),
}
return pool

View File

@@ -1,11 +1,11 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
@@ -19,9 +19,9 @@ func (p *AttCaches) SaveUnaggregatedAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.unAggregatedAtt.Set(string(r[:]), att, cache.DefaultExpiration)
p.unAggregateAttLock.Lock()
defer p.unAggregateAttLock.Unlock()
p.unAggregatedAtt[r] = stateTrie.CopyAttestation(att) // Copied.
return nil
}
@@ -39,17 +39,12 @@ func (p *AttCaches) SaveUnaggregatedAttestations(atts []*ethpb.Attestation) erro
// UnaggregatedAttestations returns all the unaggregated attestations in cache.
func (p *AttCaches) UnaggregatedAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.unAggregatedAtt.ItemCount())
for s, i := range p.unAggregatedAtt.Items() {
atts := make([]*ethpb.Attestation, 0)
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.(*ethpb.Attestation)
if !ok {
p.unAggregatedAtt.Delete(s)
continue
}
atts = append(atts, att)
p.unAggregateAttLock.RLock()
defer p.unAggregateAttLock.RUnlock()
for _, att := range p.unAggregatedAtt {
atts = append(atts, stateTrie.CopyAttestation(att) /* Copied */)
}
return atts
@@ -66,12 +61,16 @@ func (p *AttCaches) DeleteUnaggregatedAttestation(att *ethpb.Attestation) error
return errors.Wrap(err, "could not tree hash attestation")
}
p.unAggregatedAtt.Delete(string(r[:]))
p.unAggregateAttLock.Lock()
defer p.unAggregateAttLock.Unlock()
delete(p.unAggregatedAtt, r)
return nil
}
// UnaggregatedAttestationCount returns the number of unaggregated attestations key in the pool.
func (p *AttCaches) UnaggregatedAttestationCount() int {
return p.unAggregatedAtt.ItemCount()
p.unAggregateAttLock.RLock()
defer p.unAggregateAttLock.RUnlock()
return len(p.unAggregatedAtt)
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"strings"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Unaggregated_AlreadyAggregated(t *testing.T) {
@@ -55,30 +50,3 @@ func TestKV_Unaggregated_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_Unaggregated_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11}}
r, _ := ssz.HashTreeRoot(att)
if err := cache.SaveUnaggregatedAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.unAggregatedAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.(*ethpb.Attestation)
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct unaggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -8,16 +8,28 @@ import (
var (
aggregatedAttsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "aggregated_attestations_in_pool_count",
Name: "aggregated_attestations_in_pool_total",
Help: "The number of aggregated attestations in the pool.",
},
)
unaggregatedAttsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "unaggregated_attestations_in_pool_count",
Name: "unaggregated_attestations_in_pool_total",
Help: "The number of unaggregated attestations in the pool.",
},
)
expiredAggregatedAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_aggregated_atts_total",
Help: "The number of expired and deleted aggregated attestations in the pool.",
})
expiredUnaggregatedAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_unaggregated_atts_total",
Help: "The number of expired and deleted unaggregated attestations in the pool.",
})
expiredBlockAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
)
func (s *Service) updateMetrics() {

View File

@@ -1,6 +1,7 @@
package attestations
import (
"bytes"
"context"
"errors"
"time"
@@ -108,7 +109,7 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
return false, err
}
incomingBits := att.AggregationBits
savedBits, ok := s.forkChoiceProcessedRoots.Get(string(attRoot[:]))
savedBits, ok := s.forkChoiceProcessedRoots.Get(attRoot)
if ok {
savedBitlist, ok := savedBits.(bitfield.Bitlist)
if !ok {
@@ -116,7 +117,7 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
}
if savedBitlist.Len() == incomingBits.Len() {
// Returns true if the node has seen all the bits in the new bit field of the incoming attestation.
if savedBitlist.Contains(incomingBits) {
if bytes.Equal(savedBitlist, incomingBits) || savedBitlist.Contains(incomingBits) {
return true, nil
}
// Update the bit fields by Or'ing them with the new ones.
@@ -124,6 +125,6 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
}
}
s.forkChoiceProcessedRoots.Set(string(attRoot[:]), incomingBits, 1 /*cost*/)
s.forkChoiceProcessedRoots.Add(attRoot, incomingBits)
return false, nil
}

View File

@@ -5,7 +5,6 @@ import (
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -172,7 +171,7 @@ func TestBatchAttestations_Single(t *testing.T) {
t.Fatal(err)
}
wanted, err := helpers.AggregateAttestations(append(unaggregatedAtts, aggregatedAtts...))
wanted, err := helpers.AggregateAttestations(append(aggregatedAtts, unaggregatedAtts...))
if err != nil {
t.Fatal(err)
}
@@ -182,7 +181,8 @@ func TestBatchAttestations_Single(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(wanted, s.pool.ForkchoiceAttestations()) {
got := s.pool.ForkchoiceAttestations()
if !reflect.DeepEqual(wanted, got) {
t.Error("Did not aggregate and save for batch")
}
}
@@ -296,8 +296,6 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
t.Error("Wanted false, got true")
}
time.Sleep(100 * time.Millisecond)
att2 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, Signature: []byte{'A'}, AggregationBits: bitfield.Bitlist{0x17} /* 0b00010111 */}
got, err = s.seen(att2)
if err != nil {
@@ -307,8 +305,6 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
t.Error("Wanted false, got true")
}
time.Sleep(100 * time.Millisecond)
att3 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, Signature: []byte{'A'}, AggregationBits: bitfield.Bitlist{0x17} /* 0b00010111 */}
got, err = s.seen(att3)
if err != nil {
@@ -382,6 +378,5 @@ func TestService_seen(t *testing.T) {
if got != tt.want {
t.Errorf("Test %d failed. Got=%v want=%v", i, got, tt.want)
}
time.Sleep(10) // Sleep briefly for cache to routine to buffer.
}
}

View File

@@ -0,0 +1,71 @@
package attestations
import (
"time"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
)
// Prune expired attestations from the pool every slot interval.
var pruneExpiredAttsPeriod = time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
// This prunes attestations pool by running pruneExpiredAtts
// at every pruneExpiredAttsPeriod.
func (s *Service) pruneAttsPool() {
ticker := time.NewTicker(pruneExpiredAttsPeriod)
for {
select {
case <-ticker.C:
s.pruneExpiredAtts()
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}
// This prunes expired attestations from the pool.
func (s *Service) pruneExpiredAtts() {
aggregatedAtts := s.pool.AggregatedAttestations()
for _, att := range aggregatedAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteAggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired aggregated attestation")
}
expiredAggregatedAtts.Inc()
}
}
unAggregatedAtts := s.pool.UnaggregatedAttestations()
for _, att := range unAggregatedAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired unaggregated attestation")
}
expiredUnaggregatedAtts.Inc()
}
}
blockAtts := s.pool.BlockAttestations()
for _, att := range blockAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteBlockAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired block attestation")
}
}
expiredBlockAtts.Inc()
}
}
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(slot uint64) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + expirationSlot*params.BeaconConfig().SecondsPerSlot
currentTime := uint64(roughtime.Now().Unix())
if currentTime >= expirationTime {
return true
}
return false
}

View File

@@ -0,0 +1,62 @@
package attestations
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
)
func TestPruneExpiredAtts_CanPrune(t *testing.T) {
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 0}, AggregationBits: bitfield.Bitlist{0b1101}}
att2 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 0}, AggregationBits: bitfield.Bitlist{0b1111}}
att3 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}}
att4 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1110}}
atts := []*ethpb.Attestation{att1, att2, att3, att4}
if err := s.pool.SaveAggregatedAttestations(atts); err != nil {
t.Fatal(err)
}
if err := s.pool.SaveBlockAttestations(atts); err != nil {
t.Fatal(err)
}
// Rewind back one epoch worth of time.
s.genesisTime = uint64(roughtime.Now().Unix()) - params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot
s.pruneExpiredAtts()
// All the attestations on slot 0 should be pruned.
for _, attestation := range s.pool.AggregatedAttestations() {
if attestation.Data.Slot == 0 {
t.Error("Should be pruned")
}
}
for _, attestation := range s.pool.BlockAttestations() {
if attestation.Data.Slot == 0 {
t.Error("Should be pruned")
}
}
}
func TestExpired_AttsCanExpire(t *testing.T) {
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
if err != nil {
t.Fatal(err)
}
// Rewind back one epoch worth of time.
s.genesisTime = uint64(roughtime.Now().Unix()) - params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot
if !s.expired(0) {
t.Error("Should expired")
}
if s.expired(1) {
t.Error("Should not expired")
}
}

View File

@@ -3,10 +3,10 @@ package attestations
import (
"context"
"github.com/dgraph-io/ristretto"
lru "github.com/hashicorp/golang-lru"
)
var forkChoiceProcessedRootsSize = int64(1 << 16)
var forkChoiceProcessedRootsSize = 1 << 16
// Service of attestation pool operations.
type Service struct {
@@ -14,7 +14,8 @@ type Service struct {
cancel context.CancelFunc
pool Pool
err error
forkChoiceProcessedRoots *ristretto.Cache
forkChoiceProcessedRoots *lru.Cache
genesisTime uint64
}
// Config options for the service.
@@ -25,11 +26,7 @@ type Config struct {
// NewService instantiates a new attestation pool service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: forkChoiceProcessedRootsSize,
MaxCost: forkChoiceProcessedRootsSize,
BufferItems: 64,
})
cache, err := lru.New(forkChoiceProcessedRootsSize)
if err != nil {
return nil, err
}
@@ -47,6 +44,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
func (s *Service) Start() {
go s.prepareForkChoiceAtts()
go s.aggregateRoutine()
go s.pruneAttsPool()
}
// Stop the beacon block attestation pool service's main event loop
@@ -63,3 +61,8 @@ func (s *Service) Status() error {
}
return nil
}
// SetGenesisTime sets genesis time for operation service to use.
func (s *Service) SetGenesisTime(t uint64) {
s.genesisTime = t
}

View File

@@ -4,17 +4,23 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"metrics.go",
"service.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -27,9 +33,9 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],

View File

@@ -0,0 +1,57 @@
package slashings
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
numPendingAttesterSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_attester_slashing_fail_sig_verify_total",
Help: "Times an pending attester slashing fails sig verification",
},
)
numPendingAttesterSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_attester_slashings",
Help: "Number of pending attester slashings in the pool",
},
)
numAttesterSlashingsIncluded = promauto.NewCounter(
prometheus.CounterOpts{
Name: "attester_slashings_included_total",
Help: "Number of attester slashings included in blocks",
},
)
attesterSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "attester_slashing_reattempts_total",
Help: "Times an attester slashing for an already slashed validator is received",
},
)
numPendingProposerSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_proposer_slashing_fail_sig_verify_total",
Help: "Times an pending proposer slashing fails sig verification",
},
)
numPendingProposerSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_proposer_slashings",
Help: "Number of pending proposer slashings in the pool",
},
)
numProposerSlashingsIncluded = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proposer_slashings_included_total",
Help: "Number of proposer slashings included in blocks",
},
)
proposerSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proposer_slashing_reattempts_total",
Help: "Times a proposer slashing for an already slashed validator is received",
},
)
)

View File

@@ -1,15 +1,18 @@
package slashings
import (
"errors"
"context"
"fmt"
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"go.opencensus.io/trace"
)
// NewPool returns an initialized attester slashing and proposer slashing pool.
@@ -23,9 +26,14 @@ func NewPool() *Pool {
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
// This method will not return more than the block enforced MaxAttesterSlashings.
func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
func (p *Pool) PendingAttesterSlashings(ctx context.Context) []*ethpb.AttesterSlashing {
p.lock.RLock()
defer p.lock.RUnlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
defer span.End()
// Update prom metric.
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
included := make(map[uint64]bool)
pending := make([]*ethpb.AttesterSlashing, 0, params.BeaconConfig().MaxAttesterSlashings)
@@ -34,6 +42,7 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
break
}
if included[slashing.validatorToSlash] {
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
continue
}
attSlashing := slashing.attesterSlashing
@@ -41,6 +50,7 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
for _, idx := range slashedVal {
included[idx] = true
}
pending = append(pending, attSlashing)
}
@@ -49,9 +59,15 @@ func (p *Pool) PendingAttesterSlashings() []*ethpb.AttesterSlashing {
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
// This method will not return more than the block enforced MaxProposerSlashings.
func (p *Pool) PendingProposerSlashings() []*ethpb.ProposerSlashing {
func (p *Pool) PendingProposerSlashings(ctx context.Context) []*ethpb.ProposerSlashing {
p.lock.RLock()
defer p.lock.RUnlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
defer span.End()
// Update prom metric.
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
pending := make([]*ethpb.ProposerSlashing, 0, params.BeaconConfig().MaxProposerSlashings)
for i, slashing := range p.pendingProposerSlashing {
if i >= int(params.BeaconConfig().MaxProposerSlashings) {
@@ -64,9 +80,20 @@ func (p *Pool) PendingProposerSlashings() []*ethpb.ProposerSlashing {
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
// has been included into a block recently, or the validator is already exited.
func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *ethpb.AttesterSlashing) error {
func (p *Pool) InsertAttesterSlashing(
ctx context.Context,
state *beaconstate.BeaconState,
slashing *ethpb.AttesterSlashing,
) error {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
defer span.End()
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
numPendingAttesterSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify attester slashing")
}
slashedVal := sliceutil.IntersectionUint64(slashing.Attestation_1.AttestingIndices, slashing.Attestation_2.AttestingIndices)
for _, val := range slashedVal {
@@ -79,6 +106,7 @@ func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *
// has been recently included in the pool of slashings, do not process this new
// slashing.
if !ok {
attesterSlashingReattempts.Inc()
return fmt.Errorf("validator at index %d cannot be slashed", val)
}
@@ -107,9 +135,21 @@ func (p *Pool) InsertAttesterSlashing(state *beaconstate.BeaconState, slashing *
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
// has been included recently, the validator is already exited, or the validator was already slashed.
func (p *Pool) InsertProposerSlashing(state *beaconstate.BeaconState, slashing *ethpb.ProposerSlashing) error {
func (p *Pool) InsertProposerSlashing(
ctx context.Context,
state *beaconstate.BeaconState,
slashing *ethpb.ProposerSlashing,
) error {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
defer span.End()
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
numPendingAttesterSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify proposer slashing")
}
idx := slashing.ProposerIndex
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
if err != nil {
@@ -119,6 +159,7 @@ func (p *Pool) InsertProposerSlashing(state *beaconstate.BeaconState, slashing *
// has been recently included in the pool of slashings, do not process this new
// slashing.
if !ok {
proposerSlashingReattempts.Inc()
return fmt.Errorf("validator at index %d cannot be slashed", idx)
}
@@ -154,6 +195,7 @@ func (p *Pool) MarkIncludedAttesterSlashing(as *ethpb.AttesterSlashing) {
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
}
p.included[val] = true
numAttesterSlashingsIncluded.Inc()
}
}
@@ -170,6 +212,7 @@ func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
}
p.included[ps.ProposerIndex] = true
numProposerSlashingsIncluded.Inc()
}
// this function checks a few items about a validator before proceeding with inserting

View File

@@ -1,15 +1,16 @@
package slashings
import (
"context"
"reflect"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func attesterSlashingForValIdx(valIdx ...uint64) *ethpb.AttesterSlashing {
@@ -30,25 +31,6 @@ func pendingSlashingForValIdx(valIdx ...uint64) *PendingAttesterSlashing {
}
}
func generateNPendingSlashings(n uint64) []*PendingAttesterSlashing {
pendingAttSlashings := make([]*PendingAttesterSlashing, n)
for i := uint64(0); i < n; i++ {
pendingAttSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: attesterSlashingForValIdx(i),
validatorToSlash: i,
}
}
return pendingAttSlashings
}
func generateNAttSlashings(n uint64) []*ethpb.AttesterSlashing {
attSlashings := make([]*ethpb.AttesterSlashing, n)
for i := uint64(0); i < n; i++ {
attSlashings[i] = attesterSlashingForValIdx(i)
}
return attSlashings
}
func TestPool_InsertAttesterSlashing(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
@@ -57,8 +39,44 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
err string
}
type args struct {
slashing *ethpb.AttesterSlashing
slashings []*ethpb.AttesterSlashing
}
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 20)
slashings := make([]*ethpb.AttesterSlashing, 20)
for i := 0; i < len(pendingSlashings); i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
if err := beaconState.SetSlot(helpers.StartSlot(1)); err != nil {
t.Fatal(err)
}
// We mark the following validators with some preconditions.
exitedVal, _ := beaconState.ValidatorAtIndex(uint64(2))
exitedVal.ExitEpoch = 0
futureExitedVal, _ := beaconState.ValidatorAtIndex(uint64(4))
futureExitedVal.ExitEpoch = 17
slashedVal, _ := beaconState.ValidatorAtIndex(uint64(5))
slashedVal.Slashed = true
if err := beaconState.UpdateValidatorAtIndex(uint64(2), exitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(4), futureExitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(5), slashedVal); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
fields fields
@@ -73,12 +91,12 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[0:1],
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: attesterSlashingForValIdx(1),
validatorToSlash: 1,
attesterSlashing: slashings[0],
validatorToSlash: 0,
},
},
},
@@ -89,97 +107,33 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
validatorToSlash: 0,
},
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
},
},
validatorToSlash: 1,
},
},
},
{
name: "Empty list two validators slashed out of three",
fields: fields{
pending: make([]*PendingAttesterSlashing, 0),
included: make(map[uint64]bool),
},
args: args{
slashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
},
want: []*PendingAttesterSlashing{
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
validatorToSlash: 1,
},
{
attesterSlashing: &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 3},
},
},
validatorToSlash: 3,
},
slashings: slashings[0:2],
},
want: pendingSlashings[0:2],
},
{
name: "Duplicate identical slashing",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1),
pendingSlashings[1],
},
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1),
want: pendingSlashings[1:2],
},
{
name: "Slashing for already slashed validator",
fields: fields{
pending: []*PendingAttesterSlashing{},
included: make(map[uint64]bool),
},
args: args{
slashings: slashings[5:6],
},
want: []*PendingAttesterSlashing{},
},
{
name: "Slashing for exited validator",
@@ -188,7 +142,7 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(2),
slashings: slashings[2:3],
},
want: []*PendingAttesterSlashing{},
},
@@ -199,24 +153,9 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(4),
slashings: slashings[4:5],
},
want: []*PendingAttesterSlashing{
pendingSlashingForValIdx(4),
},
},
{
name: "Slashing for slashed validator",
fields: fields{
pending: []*PendingAttesterSlashing{},
included: make(map[uint64]bool),
wantErr: true,
err: "cannot be slashed",
},
args: args{
slashing: attesterSlashingForValIdx(5),
},
want: []*PendingAttesterSlashing{},
want: pendingSlashings[4:5],
},
{
name: "Already included",
@@ -227,7 +166,7 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
},
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*PendingAttesterSlashing{},
},
@@ -235,36 +174,15 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
name: "Maintains sorted order",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(0),
pendingSlashingForValIdx(2),
pendingSlashings[0],
pendingSlashings[2],
},
included: make(map[uint64]bool),
},
args: args{
slashing: attesterSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: generateNPendingSlashings(3),
},
}
validators := []*ethpb.Validator{
{ // 0
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 1
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 2 - Already exited.
ExitEpoch: 15,
},
{ // 3
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 4 - Will be exited.
ExitEpoch: 17,
},
{ // 5 - Slashed.
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
want: pendingSlashings[0:3],
},
}
for _, tt := range tests {
@@ -273,19 +191,19 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
pendingAttesterSlashing: tt.fields.pending,
included: tt.fields.included,
}
s, err := beaconstate.InitializeFromProtoUnsafe(&p2ppb.BeaconState{
Slot: 16 * params.BeaconConfig().SlotsPerEpoch,
Validators: validators,
})
if err != nil {
t.Fatal(err)
var err error
for i := 0; i < len(tt.args.slashings); i++ {
err = p.InsertAttesterSlashing(context.Background(), beaconState, tt.args.slashings[i])
}
err = p.InsertAttesterSlashing(s, tt.args.slashing)
if err != nil && tt.fields.wantErr && !strings.Contains(err.Error(), tt.fields.err) {
t.Fatalf("Wanted err: %v, received %v", tt.fields.err, err)
}
if len(p.pendingAttesterSlashing) != len(tt.want) {
t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pendingAttesterSlashing), len(tt.want))
t.Fatalf(
"Mismatched lengths of pending list. Got %d, wanted %d.",
len(p.pendingAttesterSlashing),
len(tt.want),
)
}
for i := range p.pendingAttesterSlashing {
if p.pendingAttesterSlashing[i].validatorToSlash != tt.want[i].validatorToSlash {
@@ -309,6 +227,52 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
}
}
func TestPool_InsertAttesterSlashing_SigFailsVerify_ClearPool(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 2)
slashings := make([]*ethpb.AttesterSlashing, 2)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
// We mess up the signature of the second slashing.
badSig := make([]byte, 96)
copy(badSig, "muahaha")
pendingSlashings[1].attesterSlashing.Attestation_1.Signature = badSig
slashings[1].Attestation_1.Signature = badSig
p := &Pool{
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
}
if err := p.InsertAttesterSlashing(
context.Background(),
beaconState,
slashings[0],
); err != nil {
t.Fatal(err)
}
if err := p.InsertAttesterSlashing(
context.Background(),
beaconState,
slashings[1],
); err == nil {
t.Error("Expected error when inserting slashing with bad sig, got nil")
}
// We expect to only have 1 pending attester slashing in the pool.
if len(p.pendingAttesterSlashing) != 1 {
t.Error("Expected failed attester slashing to have been cleared from pool")
}
}
func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
@@ -450,6 +414,23 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
type fields struct {
pending []*PendingAttesterSlashing
}
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 1
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 20)
slashings := make([]*ethpb.AttesterSlashing, 20)
for i := 0; i < len(pendingSlashings); i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
tests := []struct {
name string
fields fields
@@ -465,34 +446,16 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
{
name: "All eligible",
fields: fields{
pending: generateNPendingSlashings(1),
pending: pendingSlashings,
},
want: generateNAttSlashings(1),
want: slashings[0:1],
},
{
name: "Multiple indices",
fields: fields{
pending: []*PendingAttesterSlashing{
pendingSlashingForValIdx(1, 5, 8),
},
pending: pendingSlashings[3:6],
},
want: []*ethpb.AttesterSlashing{
attesterSlashingForValIdx(1, 5, 8),
},
},
{
name: "All eligible, over max",
fields: fields{
pending: generateNPendingSlashings(6),
},
want: generateNAttSlashings(1),
},
{
name: "No duplicate slashings for grouped",
fields: fields{
pending: generateNPendingSlashings(16),
},
want: generateNAttSlashings(1),
want: slashings[3:4],
},
}
for _, tt := range tests {
@@ -500,70 +463,43 @@ func TestPool_PendingAttesterSlashings(t *testing.T) {
p := &Pool{
pendingAttesterSlashing: tt.fields.pending,
}
if got := p.PendingAttesterSlashings(); !reflect.DeepEqual(tt.want, got) {
if got := p.PendingAttesterSlashings(
context.Background(),
); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", tt.want, got)
}
})
}
}
func TestPool_PendingAttesterSlashings_2Max(t *testing.T) {
func TestPool_PendingAttesterSlashings_NoDuplicates(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
type fields struct {
pending []*PendingAttesterSlashing
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
pendingSlashings := make([]*PendingAttesterSlashing, 3)
slashings := make([]*ethpb.AttesterSlashing, 3)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
pendingSlashings[i] = &PendingAttesterSlashing{
attesterSlashing: sl,
validatorToSlash: uint64(i),
}
slashings[i] = sl
}
tests := []struct {
name string
fields fields
want []*ethpb.AttesterSlashing
}{
{
name: "No duplicates with grouped att slashings",
fields: fields{
pending: []*PendingAttesterSlashing{
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 4,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 6,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 8,
},
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 12,
},
{
attesterSlashing: attesterSlashingForValIdx(6, 8, 24),
validatorToSlash: 24,
},
{
attesterSlashing: attesterSlashingForValIdx(4, 12, 40),
validatorToSlash: 40,
},
},
},
want: []*ethpb.AttesterSlashing{
attesterSlashingForValIdx(4, 12, 40),
attesterSlashingForValIdx(6, 8, 24),
},
},
// We duplicate the last slashing.
pendingSlashings[2] = pendingSlashings[1]
slashings[2] = slashings[1]
p := &Pool{
pendingAttesterSlashing: pendingSlashings,
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pendingAttesterSlashing: tt.fields.pending,
}
if got := p.PendingAttesterSlashings(); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", tt.want, got)
}
})
want := slashings[0:2]
if got := p.PendingAttesterSlashings(
context.Background(),
); !reflect.DeepEqual(want, got) {
t.Errorf("Unexpected return from PendingAttesterSlashings, wanted %v, received %v", want, got)
}
}

View File

@@ -1,15 +1,16 @@
package slashings
import (
"context"
"reflect"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func proposerSlashingForValIdx(valIdx uint64) *ethpb.ProposerSlashing {
@@ -18,14 +19,6 @@ func proposerSlashingForValIdx(valIdx uint64) *ethpb.ProposerSlashing {
}
}
func generateNProposerSlashings(n uint64) []*ethpb.ProposerSlashing {
proposerSlashings := make([]*ethpb.ProposerSlashing, n)
for i := uint64(0); i < n; i++ {
proposerSlashings[i] = proposerSlashingForValIdx(i)
}
return proposerSlashings
}
func TestPool_InsertProposerSlashing(t *testing.T) {
type fields struct {
wantErr bool
@@ -34,8 +27,40 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included map[uint64]bool
}
type args struct {
slashing *ethpb.ProposerSlashing
slashings []*ethpb.ProposerSlashing
}
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 20)
for i := 0; i < len(slashings); i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
if err := beaconState.SetSlot(helpers.StartSlot(1)); err != nil {
t.Fatal(err)
}
// We mark the following validators with some preconditions.
exitedVal, _ := beaconState.ValidatorAtIndex(uint64(2))
exitedVal.ExitEpoch = 0
futureExitedVal, _ := beaconState.ValidatorAtIndex(uint64(4))
futureExitedVal.ExitEpoch = 17
slashedVal, _ := beaconState.ValidatorAtIndex(uint64(5))
slashedVal.Slashed = true
if err := beaconState.UpdateValidatorAtIndex(uint64(2), exitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(4), futureExitedVal); err != nil {
t.Fatal(err)
}
if err := beaconState.UpdateValidatorAtIndex(uint64(5), slashedVal); err != nil {
t.Fatal(err)
}
tests := []struct {
name string
fields fields
@@ -49,22 +74,22 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(0),
slashings: slashings[0:1],
},
want: generateNProposerSlashings(1),
want: slashings[0:1],
},
{
name: "Duplicate identical slashing",
fields: fields{
pending: generateNProposerSlashings(1),
pending: slashings[0:1],
included: make(map[uint64]bool),
wantErr: true,
err: "slashing object already exists in pending proposer slashings",
},
args: args{
slashing: proposerSlashingForValIdx(0),
slashings: slashings[0:1],
},
want: generateNProposerSlashings(1),
want: slashings[0:1],
},
{
name: "Slashing for exited validator",
@@ -75,7 +100,7 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
err: "cannot be slashed",
},
args: args{
slashing: proposerSlashingForValIdx(2),
slashings: slashings[2:3],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -86,11 +111,9 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(4),
},
want: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(4),
slashings: slashings[4:5],
},
want: slashings[4:5],
},
{
name: "Slashing for slashed validator",
@@ -98,10 +121,10 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
pending: []*ethpb.ProposerSlashing{},
included: make(map[uint64]bool),
wantErr: true,
err: "cannot be slashed",
err: "not slashable",
},
args: args{
slashing: proposerSlashingForValIdx(5),
slashings: slashings[5:6],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -116,7 +139,7 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
err: "cannot be slashed",
},
args: args{
slashing: proposerSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*ethpb.ProposerSlashing{},
},
@@ -124,56 +147,31 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
name: "Maintains sorted order",
fields: fields{
pending: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(0),
proposerSlashingForValIdx(4),
slashings[0],
slashings[2],
},
included: make(map[uint64]bool),
},
args: args{
slashing: proposerSlashingForValIdx(1),
slashings: slashings[1:2],
},
want: []*ethpb.ProposerSlashing{
proposerSlashingForValIdx(0),
proposerSlashingForValIdx(1),
proposerSlashingForValIdx(4),
slashings[0],
slashings[1],
slashings[2],
},
},
}
validators := []*ethpb.Validator{
{ // 0
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 1
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 2 - Already exited.
ExitEpoch: 15,
},
{ // 3
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{ // 4 - Will be exited.
ExitEpoch: 17,
},
{ // 5 - Slashed.
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Pool{
pendingProposerSlashing: tt.fields.pending,
included: tt.fields.included,
}
beaconState, err := beaconstate.InitializeFromProtoUnsafe(&p2ppb.BeaconState{
Slot: 16 * params.BeaconConfig().SlotsPerEpoch,
Validators: validators,
})
if err != nil {
t.Fatal(err)
var err error
for i := 0; i < len(tt.args.slashings); i++ {
err = p.InsertProposerSlashing(context.Background(), beaconState, tt.args.slashings[i])
}
err = p.InsertProposerSlashing(beaconState, tt.args.slashing)
if err != nil && tt.fields.wantErr && !strings.Contains(err.Error(), tt.fields.err) {
t.Fatalf("Wanted err: %v, received %v", tt.fields.err, err)
}
@@ -200,6 +198,47 @@ func TestPool_InsertProposerSlashing(t *testing.T) {
}
}
func TestPool_InsertProposerSlashing_SigFailsVerify_ClearPool(t *testing.T) {
conf := params.BeaconConfig()
conf.MaxAttesterSlashings = 2
params.OverrideBeaconConfig(conf)
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 2)
for i := 0; i < 2; i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
// We mess up the signature of the second slashing.
badSig := make([]byte, 96)
copy(badSig, "muahaha")
slashings[1].Header_1.Signature = badSig
p := &Pool{
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
}
// We only want a single slashing to remain.
if err := p.InsertProposerSlashing(
context.Background(),
beaconState,
slashings[0],
); err != nil {
t.Fatal(err)
}
if err := p.InsertProposerSlashing(
context.Background(),
beaconState,
slashings[1],
); err == nil {
t.Error("Expected slashing with bad signature to fail, received nil")
}
// We expect to only have 1 pending proposer slashing in the pool.
if len(p.pendingProposerSlashing) != 1 {
t.Error("Expected failed proposer slashing to have been cleared from pool")
}
}
func TestPool_MarkIncludedProposerSlashing(t *testing.T) {
type fields struct {
pending []*ethpb.ProposerSlashing
@@ -336,8 +375,14 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
type fields struct {
pending []*ethpb.ProposerSlashing
}
type args struct {
validatorToSlash uint64
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
slashings := make([]*ethpb.ProposerSlashing, 20)
for i := 0; i < len(slashings); i++ {
sl, err := testutil.GenerateProposerSlashingForValidator(beaconState, privKeys[i], uint64(i))
if err != nil {
t.Fatal(err)
}
slashings[i] = sl
}
tests := []struct {
name string
@@ -354,16 +399,16 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
{
name: "All eligible",
fields: fields{
pending: generateNProposerSlashings(6),
pending: slashings[:params.BeaconConfig().MaxProposerSlashings],
},
want: generateNProposerSlashings(6),
want: slashings[:params.BeaconConfig().MaxProposerSlashings],
},
{
name: "All eligible, more than max",
name: "Multiple indices",
fields: fields{
pending: generateNProposerSlashings(24),
pending: slashings[3:6],
},
want: generateNProposerSlashings(16),
want: slashings[3:6],
},
}
for _, tt := range tests {
@@ -371,8 +416,10 @@ func TestPool_PendingProposerSlashings(t *testing.T) {
p := &Pool{
pendingProposerSlashing: tt.fields.pending,
}
if got := p.PendingProposerSlashings(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("PendingProposerSlashings() = %v, want %v", got, tt.want)
if got := p.PendingProposerSlashings(
context.Background(),
); !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected return from PendingProposerSlashings, wanted %v, received %v", tt.want, got)
}
})
}

View File

@@ -132,7 +132,7 @@ func TestMultiAddrConversion_OK(t *testing.T) {
}
func TestStaticPeering_PeersAreAdded(t *testing.T) {
cfg := &Config{Encoding: "ssz"}
cfg := &Config{Encoding: "ssz", MaxPeers: 30}
port := 3000
var staticPeers []string
var hosts []host.Host

View File

@@ -26,11 +26,13 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/runutil"
"github.com/sirupsen/logrus"
)
var _ = shared.Service(&Service{})
var pollingPeriod = 1 * time.Second
// Check local table every 5 seconds for newly added peers.
var pollingPeriod = 5 * time.Second
const prysmProtocolPrefix = "/prysm/0.0.0"
@@ -158,7 +160,7 @@ func (s *Service) Start() {
s.startupErr = err
return
}
err = s.addBootNodesToExclusionList()
err = s.connectToBootnodes()
if err != nil {
log.WithError(err).Error("Could not add bootnode to the exclusion list")
s.startupErr = err
@@ -293,12 +295,8 @@ func (s *Service) Peers() *peers.Status {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
bootNode, err := enode.Parse(enode.ValidSchemes, s.cfg.Discv5BootStrapAddr[0])
if err != nil {
log.Fatal(err)
}
runutil.RunEvery(s.ctx, pollingPeriod, func() {
nodes := s.dv5Listener.Lookup(bootNode.ID())
nodes := s.dv5Listener.LookupRandom()
multiAddresses := convertToMultiAddr(nodes)
s.connectWithAllPeers(multiAddresses)
})
@@ -313,6 +311,11 @@ func (s *Service) connectWithAllPeers(multiAddrs []ma.Multiaddr) {
for _, info := range addrInfos {
// make each dial non-blocking
go func(info peer.AddrInfo) {
if len(s.Peers().Active()) >= int(s.cfg.MaxPeers) {
log.WithFields(logrus.Fields{"peer": info.ID.String(),
"reason": "at peer limit"}).Trace("Not dialing peer")
return
}
if info.ID == s.host.ID() {
return
}
@@ -327,24 +330,17 @@ func (s *Service) connectWithAllPeers(multiAddrs []ma.Multiaddr) {
}
}
func (s *Service) addBootNodesToExclusionList() error {
func (s *Service) connectToBootnodes() error {
nodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddr))
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return err
}
multAddr, err := convertToSingleMultiAddr(bootNode)
if err != nil {
return err
}
addrInfo, err := peer.AddrInfoFromP2pAddr(multAddr)
if err != nil {
return err
}
// bootnode is never dialled, so ttl is tentatively 1 year
s.exclusionList.Set(addrInfo.ID.String(), true, 1)
nodes = append(nodes, bootNode)
}
multiAddresses := convertToMultiAddr(nodes)
s.connectWithAllPeers(multiAddresses)
return nil
}

View File

@@ -130,12 +130,20 @@ func TestListenForNewNodes(t *testing.T) {
bootListener := createListener(ipAddr, pkey, cfg)
defer bootListener.Close()
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
defer func() {
pollingPeriod = currentPeriod
}()
bootNode := bootListener.Self()
cfg = &Config{
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
Encoding: "ssz",
MaxPeers: 30,
}
var listeners []*discover.UDPv5
var hosts []host.Host

View File

@@ -30,6 +30,7 @@ go_library(
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/trieutil"
"github.com/sirupsen/logrus"
)
@@ -523,7 +524,7 @@ func (s *Service) handleDelayTicker() {
// (analyzed the time of the block from 2018-09-01 to 2019-02-13)
fiveMinutesTimeout := time.Now().Add(-5 * time.Minute)
// check that web3 client is syncing
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) && roughtime.Now().Second()%15 == 0 {
log.Warn("eth1 client is not syncing")
}
if !s.chainStartData.Chainstarted {

View File

@@ -22,6 +22,7 @@ go_library(
"//beacon-chain/rpc/beacon:go_default_library",
"//beacon-chain/rpc/node:go_default_library",
"//beacon-chain/rpc/validator:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",

View File

@@ -31,12 +31,15 @@ go_library(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/pagination:go_default_library",
"//shared/params:go_default_library",
@@ -81,10 +84,13 @@ go_test(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"sort"
"strconv"
"time"
ptypes "github.com/gogo/protobuf/types"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -16,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/pagination"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -249,6 +251,7 @@ func (bs *Server) StreamIndexedAttestations(
attestationsChannel := make(chan *feed.Event, 1)
attSub := bs.AttestationNotifier.OperationFeed().Subscribe(attestationsChannel)
defer attSub.Unsubscribe()
go bs.collectReceivedAttestations(stream.Context())
for {
select {
case event := <-attestationsChannel:
@@ -262,21 +265,40 @@ func (bs *Server) StreamIndexedAttestations(
// One nil attestation shouldn't stop the stream.
continue
}
epoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(stream.Context(), epoch)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not retrieve committees for epoch %d: %v",
epoch,
err,
)
}
// We use the retrieved committees for the epoch to convert all attestations
// into indexed form effectively.
startSlot := helpers.StartSlot(epoch)
endSlot := startSlot + params.BeaconConfig().SlotsPerEpoch
att := data.Attestation
bs.ReceivedAttestationsBuffer <- data.Attestation
}
case atts := <-bs.CollectedAttestationsBuffer:
// We aggregate the received attestations.
aggAtts, err := helpers.AggregateAttestations(atts)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not aggregate attestations: %v",
err,
)
}
if len(aggAtts) == 0 {
continue
}
// All attestations we receive have the same target epoch given they
// have the same data root, so we just use the target epoch from
// the first one to determine committees for converting into indexed
// form.
epoch := aggAtts[0].Data.Target.Epoch
committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(stream.Context(), epoch)
if err != nil {
return status.Errorf(
codes.Internal,
"Could not retrieve committees for epoch %d: %v",
epoch,
err,
)
}
// We use the retrieved committees for the epoch to convert all attestations
// into indexed form effectively.
startSlot := helpers.StartSlot(epoch)
endSlot := startSlot + params.BeaconConfig().SlotsPerEpoch
for _, att := range aggAtts {
// Out of range check, the attestation slot cannot be greater
// the last slot of the requested epoch or smaller than its start slot
// given committees are accessed as a map of slot -> commitees list, where there are
@@ -284,7 +306,6 @@ func (bs *Server) StreamIndexedAttestations(
if att.Data.Slot < startSlot || att.Data.Slot > endSlot {
continue
}
committeesForSlot, ok := committeesBySlot[att.Data.Slot]
if !ok || committeesForSlot.Committees == nil {
continue
@@ -311,6 +332,36 @@ func (bs *Server) StreamIndexedAttestations(
}
}
// TODO(#5031): Instead of doing aggregation here, leverage the aggregation
// already being done by the attestation pool in the operations service.
func (bs *Server) collectReceivedAttestations(ctx context.Context) {
attsByRoot := make(map[[32]byte][]*ethpb.Attestation)
halfASlot := time.Duration(params.BeaconConfig().SecondsPerSlot / 2)
ticker := time.NewTicker(time.Second * halfASlot)
for {
select {
case <-ticker.C:
for root, atts := range attsByRoot {
if len(atts) > 0 {
bs.CollectedAttestationsBuffer <- atts
attsByRoot[root] = make([]*ethpb.Attestation, 0)
}
}
case att := <-bs.ReceivedAttestationsBuffer:
attDataRoot, err := ssz.HashTreeRoot(att.Data)
if err != nil {
logrus.Errorf("Could not hash tree root data: %v", err)
continue
}
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)
case <-ctx.Done():
return
case <-bs.Ctx.Done():
return
}
}
}
// AttestationPool retrieves pending attestations.
//
// The server returns a list of attestations that have been seen but not

View File

@@ -26,6 +26,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
@@ -883,7 +884,7 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mockRPC.NewMockBeaconChain_StreamIndexedAttestationsServer(ctrl)
mockStream.EXPECT().Context().Return(ctx)
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
go func(tt *testing.T) {
if err := server.StreamIndexedAttestations(
&ptypes.Empty{},
@@ -897,7 +898,7 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
exitRoutine <- true
}
func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
exitRoutine := make(chan bool)
@@ -937,7 +938,7 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
for j := 0; j < numValidators; j++ {
attExample := &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: []byte("root"),
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Slot: i,
Target: &ethpb.Checkpoint{
Epoch: 0,
@@ -977,10 +978,16 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
atts = append(atts, attExample)
}
}
aggAtts, err := helpers.AggregateAttestations(atts)
if err != nil {
t.Fatal(err)
}
// Next up we convert the test attestations to indexed form.
indexedAtts := make([]*ethpb.IndexedAttestation, len(atts), len(atts))
indexedAtts := make([]*ethpb.IndexedAttestation, len(aggAtts), len(aggAtts))
for i := 0; i < len(indexedAtts); i++ {
att := atts[i]
att := aggAtts[i]
committee := committees[att.Data.Slot].Committees[att.Data.CommitteeIndex]
idxAtt, err := attestationutil.ConvertToIndexed(ctx, att, committee.ValidatorIndices)
if err != nil {
@@ -999,7 +1006,8 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now(),
},
AttestationNotifier: chainService.OperationNotifier(),
AttestationNotifier: chainService.OperationNotifier(),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 1),
}
mockStream := mockRPC.NewMockBeaconChain_StreamIndexedAttestationsServer(ctrl)
@@ -1020,15 +1028,7 @@ func TestServer_StreamIndexedAttestations_OnSlotTick(t *testing.T) {
}
}(t)
for i := 0; i < len(atts); i++ {
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = server.AttestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{Attestation: atts[i]},
})
}
}
server.CollectedAttestationsBuffer <- atts
<-exitRoutine
}

View File

@@ -22,7 +22,7 @@ func (bs *Server) ListBeaconCommittees(
var requestingGenesis bool
var startSlot uint64
headSlot := bs.HeadFetcher.HeadSlot()
headSlot := bs.GenesisTimeFetcher.CurrentSlot()
switch q := req.QueryFilter.(type) {
case *ethpb.ListCommitteesRequest_Epoch:
startSlot = helpers.StartSlot(q.Epoch)
@@ -58,8 +58,8 @@ func (bs *Server) retrieveCommitteesForEpoch(
var activeIndices []uint64
var err error
startSlot := helpers.StartSlot(epoch)
headEpoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
if helpers.SlotToEpoch(startSlot)+1 < headEpoch {
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
if helpers.SlotToEpoch(startSlot)+1 < currentEpoch {
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(helpers.SlotToEpoch(startSlot))
if err != nil {
return nil, nil, status.Errorf(
@@ -86,7 +86,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
)
}
attesterSeed = bytesutil.ToBytes32(archivedCommitteeInfo.AttesterSeed)
} else if helpers.SlotToEpoch(startSlot)+1 == headEpoch || helpers.SlotToEpoch(startSlot) == headEpoch {
} else if helpers.SlotToEpoch(startSlot)+1 == currentEpoch || helpers.SlotToEpoch(startSlot) == currentEpoch {
// Otherwise, we use current beacon state to calculate the committees.
requestedEpoch := helpers.SlotToEpoch(startSlot)
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(requestedEpoch)
@@ -112,7 +112,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
return nil, nil, status.Errorf(
codes.InvalidArgument,
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
headEpoch,
currentEpoch,
helpers.SlotToEpoch(startSlot),
)
}

View File

@@ -34,10 +34,12 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
t.Fatal(err)
}
m := &mock.ChainService{
State: headState,
}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)
@@ -84,10 +86,12 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
headState.SetRandaoMixes(mixes)
headState.SetSlot(params.BeaconConfig().SlotsPerEpoch * 2)
m := &mock.ChainService{
State: headState,
}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 1)
@@ -183,11 +187,13 @@ func TestServer_ListBeaconCommittees_FromArchive(t *testing.T) {
t.Fatal(err)
}
m := &mock.ChainService{
State: headState,
}
bs := &Server{
BeaconDB: db,
HeadFetcher: &mock.ChainService{
State: headState,
},
BeaconDB: db,
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
@@ -12,7 +13,9 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
@@ -20,20 +23,24 @@ import (
// providing RPC endpoints to access data relevant to the Ethereum 2.0 phase 0
// beacon chain.
type Server struct {
BeaconDB db.ReadOnlyDatabase
Ctx context.Context
ChainStartFetcher powchain.ChainStartFetcher
HeadFetcher blockchain.HeadFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ParticipationFetcher blockchain.ParticipationFetcher
DepositFetcher depositcache.DepositFetcher
BlockFetcher powchain.POWBlockFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
AttestationsPool attestations.Pool
SlashingsPool *slashings.Pool
CanonicalStateChan chan *pbp2p.BeaconState
ChainStartChan chan time.Time
BeaconDB db.ReadOnlyDatabase
Ctx context.Context
ChainStartFetcher powchain.ChainStartFetcher
HeadFetcher blockchain.HeadFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ParticipationFetcher blockchain.ParticipationFetcher
DepositFetcher depositcache.DepositFetcher
BlockFetcher powchain.POWBlockFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationsPool attestations.Pool
SlashingsPool *slashings.Pool
CanonicalStateChan chan *pbp2p.BeaconState
ChainStartChan chan time.Time
ReceivedAttestationsBuffer chan *ethpb.Attestation
CollectedAttestationsBuffer chan []*ethpb.Attestation
StateGen *stategen.State
}

View File

@@ -4,6 +4,7 @@ import (
"context"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -20,9 +21,12 @@ func (bs *Server) SubmitProposerSlashing(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
if err := bs.SlashingsPool.InsertProposerSlashing(beaconState, req); err != nil {
if err := bs.SlashingsPool.InsertProposerSlashing(ctx, beaconState, req); err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
}
if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req)
}
return &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{req.ProposerIndex},
}, nil
@@ -39,9 +43,12 @@ func (bs *Server) SubmitAttesterSlashing(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
if err := bs.SlashingsPool.InsertAttesterSlashing(beaconState, req); err != nil {
if err := bs.SlashingsPool.InsertAttesterSlashing(ctx, beaconState, req); err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
}
if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req)
}
slashedIndices := sliceutil.IntersectionUint64(req.Attestation_1.AttestingIndices, req.Attestation_2.AttestingIndices)
return &ethpb.SubmitSlashingResponse{
SlashedIndices: slashedIndices,

View File

@@ -2,47 +2,37 @@ package beacon
import (
"context"
"strconv"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestServer_SubmitProposerSlashing(t *testing.T) {
ctx := context.Background()
vals := make([]*ethpb.Validator, 10)
for i := 0; i < len(vals); i++ {
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
vals[i] = &ethpb.Validator{
PublicKey: key[:],
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
}
}
// We mark the validator at index 5 as already slashed.
vals[5].Slashed = true
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: vals,
})
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
// We want a proposer slashing for validator with index 2 to
@@ -50,27 +40,11 @@ func TestServer_SubmitProposerSlashing(t *testing.T) {
wanted := &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{2},
}
slashing := &ethpb.ProposerSlashing{
ProposerIndex: 2,
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 0,
ParentRoot: nil,
StateRoot: nil,
BodyRoot: nil,
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 0,
ParentRoot: nil,
StateRoot: nil,
BodyRoot: nil,
},
Signature: make([]byte, 96),
},
slashing, err := testutil.GenerateProposerSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
res, err := bs.SubmitProposerSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
@@ -79,59 +53,103 @@ func TestServer_SubmitProposerSlashing(t *testing.T) {
t.Errorf("Wanted %v, received %v", wanted, res)
}
if mb.BroadcastCalled {
t.Errorf("Expected broadcast not to be called by default")
}
slashing, err = testutil.GenerateProposerSlashingForValidator(st, privs[5], uint64(5))
if err != nil {
t.Fatal(err)
}
// We do not want a proposer slashing for an already slashed validator
// (the validator at index 5) to be included in the pool.
slashing.ProposerIndex = 5
if _, err := bs.SubmitProposerSlashing(ctx, slashing); err == nil {
t.Error("Expected including a proposer slashing for an already slashed validator to fail")
}
}
func TestServer_SubmitAttesterSlashing(t *testing.T) {
func TestServer_SubmitProposerSlashingBroadcast(t *testing.T) {
ctx := context.Background()
vals := make([]*ethpb.Validator, 10)
for i := 0; i < len(vals); i++ {
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
vals[i] = &ethpb.Validator{
PublicKey: key[:],
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
}
}
// We mark the validators at index 5, 6, 7 as already slashed.
vals[5].Slashed = true
vals[6].Slashed = true
vals[7].Slashed = true
cfg := featureconfig.Get()
cfg.BroadcastSlashings = true
featureconfig.Init(cfg)
defer func() {
cfg.BroadcastSlashings = false
featureconfig.Init(cfg)
}()
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: vals,
})
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing := &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{2, 3, 4},
},
// We want a proposer slashing for validator with index 2 to
// be included in the pool.
slashing, err := testutil.GenerateProposerSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
_, err = bs.SubmitProposerSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
}
if !mb.BroadcastCalled {
t.Errorf("Expected broadcast to be called")
}
}
func TestServer_SubmitAttesterSlashing(t *testing.T) {
ctx := context.Background()
// We mark the validators at index 5, 6 as already slashed.
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing, err := testutil.GenerateAttesterSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
// We want the intersection of the slashing attesting indices
// to be slashed, so we expect validators 2 and 3 to be in the response
// slashed indices.
wanted := &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{2, 3},
SlashedIndices: []uint64{2},
}
res, err := bs.SubmitAttesterSlashing(ctx, slashing)
if err != nil {
@@ -140,18 +158,65 @@ func TestServer_SubmitAttesterSlashing(t *testing.T) {
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
}
if mb.BroadcastCalled {
t.Errorf("Expected broadcast not to be called by default")
}
slashing, err = testutil.GenerateAttesterSlashingForValidator(st, privs[5], uint64(5))
if err != nil {
t.Fatal(err)
}
// If any of the attesting indices in the slashing object have already
// been slashed, we should fail to insert properly into the attester slashing pool.
slashing = &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{5, 6, 7},
},
Attestation_2: &ethpb.IndexedAttestation{
AttestingIndices: []uint64{6, 7, 8},
},
}
if _, err := bs.SubmitAttesterSlashing(ctx, slashing); err == nil {
t.Error("Expected including a attester slashing for an already slashed validator to fail")
}
}
func TestServer_SubmitAttesterSlashingBroadcast(t *testing.T) {
ctx := context.Background()
cfg := featureconfig.Get()
cfg.BroadcastSlashings = true
featureconfig.Init(cfg)
defer func() {
cfg.BroadcastSlashings = false
featureconfig.Init(cfg)
}()
// We mark the validators at index 5, 6 as already slashed.
st, privs := testutil.DeterministicGenesisState(t, 64)
slashedVal, err := st.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
// We mark the validator at index 5 as already slashed.
slashedVal.Slashed = true
if err := st.UpdateValidatorAtIndex(5, slashedVal); err != nil {
t.Fatal(err)
}
mb := &mockp2p.MockBroadcaster{}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
SlashingsPool: slashings.NewPool(),
Broadcaster: mb,
}
slashing, err := testutil.GenerateAttesterSlashingForValidator(st, privs[2], uint64(2))
if err != nil {
t.Fatal(err)
}
// We want the intersection of the slashing attesting indices
// to be slashed, so we expect validators 2 and 3 to be in the response
// slashed indices.
_, err = bs.SubmitAttesterSlashing(ctx, slashing)
if err != nil {
t.Fatal(err)
}
if !mb.BroadcastCalled {
t.Errorf("Expected broadcast to be called when flag is set")
}
}

View File

@@ -81,13 +81,10 @@ func (bs *Server) ListValidatorBalances(
if len(pubKey) == 0 {
continue
}
index, ok, err := bs.BeaconDB.ValidatorIndex(ctx, pubKey)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve validator index: %v", err)
}
pubkeyBytes := bytesutil.ToBytes48(pubKey)
index, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", pubKey)
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", pubkeyBytes)
}
filtered[index] = true
@@ -124,6 +121,10 @@ func (bs *Server) ListValidatorBalances(
}
balancesCount = len(res)
}
// Depending on the indices and public keys given, results might not be sorted.
sort.Slice(res, func(i, j int) bool {
return res[i].Index < res[j].Index
})
// If there are no balances, we simply return a response specifying this.
// Otherwise, attempting to paginate 0 balances below would result in an error.
@@ -199,16 +200,55 @@ func (bs *Server) ListValidators(
}
validatorList := make([]*ethpb.Validators_ValidatorContainer, 0)
for i := 0; i < headState.NumValidators(); i++ {
val, err := headState.ValidatorAtIndex(uint64(i))
for _, index := range req.Indices {
val, err := headState.ValidatorAtIndex(index)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator")
}
validatorList = append(validatorList, &ethpb.Validators_ValidatorContainer{
Index: uint64(i),
Index: index,
Validator: val,
})
}
for _, pubKey := range req.PublicKeys {
// Skip empty public key.
if len(pubKey) == 0 {
continue
}
pubkeyBytes := bytesutil.ToBytes48(pubKey)
index, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
continue
}
val, err := headState.ValidatorAtIndex(index)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator")
}
validatorList = append(validatorList, &ethpb.Validators_ValidatorContainer{
Index: index,
Validator: val,
})
}
// Depending on the indices and public keys given, results might not be sorted.
sort.Slice(validatorList, func(i, j int) bool {
return validatorList[i].Index < validatorList[j].Index
})
if len(req.PublicKeys) == 0 && len(req.Indices) == 0 {
for i := 0; i < headState.NumValidators(); i++ {
val, err := headState.ValidatorAtIndex(uint64(i))
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator")
}
validatorList = append(validatorList, &ethpb.Validators_ValidatorContainer{
Index: uint64(i),
Validator: val,
})
}
}
if requestedEpoch < currentEpoch {
stopIdx := len(validatorList)
for idx, item := range validatorList {
@@ -337,8 +377,9 @@ func (bs *Server) GetValidatorActiveSetChanges(
}
activatedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
exitedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
ejectedIndices := make([]uint64, 0)
if requestingGenesis || requestedEpoch < currentEpoch {
archivedChanges, err := bs.BeaconDB.ArchivedActiveValidatorChanges(ctx, requestedEpoch)
if err != nil {
@@ -352,8 +393,9 @@ func (bs *Server) GetValidatorActiveSetChanges(
)
}
activatedIndices = archivedChanges.Activated
slashedIndices = archivedChanges.Slashed
exitedIndices = archivedChanges.Exited
slashedIndices = archivedChanges.Slashed
ejectedIndices = archivedChanges.Ejected
} else if requestedEpoch == currentEpoch {
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.PrevEpoch(headState))
if err != nil {
@@ -361,11 +403,15 @@ func (bs *Server) GetValidatorActiveSetChanges(
}
vals := headState.Validators()
activatedIndices = validators.ActivatedValidatorIndices(helpers.PrevEpoch(headState), vals)
slashedIndices = validators.SlashedValidatorIndices(helpers.PrevEpoch(headState), vals)
exitedIndices, err = validators.ExitedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine exited validator indices: %v", err)
}
slashedIndices = validators.SlashedValidatorIndices(helpers.PrevEpoch(headState), vals)
ejectedIndices, err = validators.EjectedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine ejected validator indices: %v", err)
}
} else {
// We are requesting data from the future and we return an error.
return nil, status.Errorf(
@@ -378,25 +424,35 @@ func (bs *Server) GetValidatorActiveSetChanges(
// We retrieve the public keys for the indices.
activatedKeys := make([][]byte, len(activatedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
exitedKeys := make([][]byte, len(exitedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
ejectedKeys := make([][]byte, len(ejectedIndices))
for i, idx := range activatedIndices {
pubkey := headState.PubkeyAtIndex(idx)
activatedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := headState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range exitedIndices {
pubkey := headState.PubkeyAtIndex(idx)
exitedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := headState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range ejectedIndices {
pubkey := headState.PubkeyAtIndex(idx)
ejectedKeys[i] = pubkey[:]
}
return &ethpb.ActiveSetChanges{
Epoch: requestedEpoch,
ActivatedPublicKeys: activatedKeys,
ActivatedIndices: activatedIndices,
ExitedPublicKeys: exitedKeys,
ExitedIndices: exitedIndices,
SlashedPublicKeys: slashedKeys,
SlashedIndices: slashedIndices,
EjectedPublicKeys: ejectedKeys,
EjectedIndices: ejectedIndices,
}, nil
}
@@ -595,13 +651,15 @@ func (bs *Server) GetValidatorPerformance(
correctlyVotedHead := make([]bool, 0, reqPubKeysCount)
missingValidators := make([][]byte, 0, reqPubKeysCount)
headState, err := bs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get head state")
}
// Convert the list of validator public keys to list of validator indices.
// Also track missing validators using public keys.
for _, key := range req.PublicKeys {
idx, ok, err := bs.BeaconDB.ValidatorIndex(ctx, key)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not fetch validator idx for public key %#x: %v", key, err)
}
pubkeyBytes := bytesutil.ToBytes48(key)
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
if !ok {
missingValidators = append(missingValidators, key)
continue

View File

@@ -5,6 +5,7 @@ import (
"encoding/binary"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"testing"
@@ -694,6 +695,59 @@ func TestServer_ListValidators_NoPagination(t *testing.T) {
}
}
func TestServer_ListValidators_IndicesPubKeys(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
validators, _ := setupValidators(t, db, 100)
indicesWanted := []uint64{2, 7, 11, 17}
pubkeyIndicesWanted := []uint64{3, 5, 9, 15}
allIndicesWanted := append(indicesWanted, pubkeyIndicesWanted...)
want := make([]*ethpb.Validators_ValidatorContainer, len(allIndicesWanted))
for i, idx := range allIndicesWanted {
want[i] = &ethpb.Validators_ValidatorContainer{
Index: idx,
Validator: validators[idx],
}
}
sort.Slice(want, func(i int, j int) bool {
return want[i].Index < want[j].Index
})
headState, err := db.HeadState(context.Background())
if err != nil {
t.Fatal(err)
}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
FinalizationFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
},
},
}
pubKeysWanted := make([][]byte, len(pubkeyIndicesWanted))
for i, indice := range pubkeyIndicesWanted {
pubKeysWanted[i] = pubKey(indice)
}
req := &ethpb.ListValidatorsRequest{
Indices: indicesWanted,
PublicKeys: pubKeysWanted,
}
received, err := bs.ListValidators(context.Background(), req)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(want, received.ValidatorList) {
t.Fatal("Incorrect respond of validators")
}
}
func TestServer_ListValidators_Pagination(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
@@ -1106,7 +1160,7 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing
func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
ctx := context.Background()
validators := make([]*ethpb.Validator, 6)
validators := make([]*ethpb.Validator, 8)
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: validators,
@@ -1119,6 +1173,7 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
withdrawableEpoch := params.BeaconConfig().FarFutureEpoch
exitEpoch := params.BeaconConfig().FarFutureEpoch
slashed := false
balance := params.BeaconConfig().MaxEffectiveBalance
// Mark indices divisible by two as activated.
if i%2 == 0 {
activationEpoch = helpers.ActivationExitEpoch(0)
@@ -1130,10 +1185,16 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
// Mark indices divisible by 5 as exited.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
} else if i%7 == 0 {
// Mark indices divisible by 7 as ejected.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
balance = params.BeaconConfig().EjectionBalance
}
if err := headState.UpdateValidatorAtIndex(uint64(i), &ethpb.Validator{
ActivationEpoch: activationEpoch,
PublicKey: pubKey(uint64(i)),
EffectiveBalance: balance,
WithdrawalCredentials: make([]byte, 32),
WithdrawableEpoch: withdrawableEpoch,
Slashed: slashed,
@@ -1158,21 +1219,34 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
pubKey(0),
pubKey(2),
pubKey(4),
pubKey(6),
}
wantedSlashed := [][]byte{
pubKey(3),
}
wantedActiveIndices := []uint64{0, 2, 4, 6}
wantedExited := [][]byte{
pubKey(5),
}
wantedExitedIndices := []uint64{5}
wantedSlashed := [][]byte{
pubKey(3),
}
wantedSlashedIndices := []uint64{3}
wantedEjected := [][]byte{
pubKey(7),
}
wantedEjectedIndices := []uint64{7}
wanted := &ethpb.ActiveSetChanges{
Epoch: 0,
ActivatedPublicKeys: wantedActive,
ActivatedIndices: wantedActiveIndices,
ExitedPublicKeys: wantedExited,
ExitedIndices: wantedExitedIndices,
SlashedPublicKeys: wantedSlashed,
SlashedIndices: wantedSlashedIndices,
EjectedPublicKeys: wantedEjected,
EjectedIndices: wantedEjectedIndices,
}
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
}
@@ -1180,7 +1254,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
ctx := context.Background()
validators := make([]*ethpb.Validator, 6)
validators := make([]*ethpb.Validator, 8)
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: helpers.StartSlot(100),
Validators: validators,
@@ -1189,8 +1263,9 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
t.Fatal(err)
}
activatedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
exitedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
ejectedIndices := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
// Mark indices divisible by two as activated.
if i%2 == 0 {
@@ -1201,6 +1276,9 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
} else if i%5 == 0 {
// Mark indices divisible by 5 as exited.
exitedIndices = append(exitedIndices, uint64(i))
} else if i%7 == 0 {
// Mark indices divisible by 7 as ejected.
ejectedIndices = append(ejectedIndices, uint64(i))
}
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
@@ -1214,6 +1292,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
Activated: activatedIndices,
Exited: exitedIndices,
Slashed: slashedIndices,
Ejected: ejectedIndices,
}
// We store the changes during the genesis epoch.
if err := db.SaveArchivedActiveValidatorChanges(ctx, 0, archivedChanges); err != nil {
@@ -1235,7 +1314,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
if err != nil {
t.Fatal(err)
}
wantedKeys := make([][]byte, 6)
wantedKeys := make([][]byte, 8)
for i := 0; i < len(wantedKeys); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
@@ -1245,21 +1324,34 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
wantedKeys[0],
wantedKeys[2],
wantedKeys[4],
wantedKeys[6],
}
wantedSlashed := [][]byte{
wantedKeys[3],
}
wantedActiveIndices := []uint64{0, 2, 4, 6}
wantedExited := [][]byte{
wantedKeys[5],
}
wantedExitedIndices := []uint64{5}
wantedSlashed := [][]byte{
wantedKeys[3],
}
wantedSlashedIndices := []uint64{3}
wantedEjected := [][]byte{
wantedKeys[7],
}
wantedEjectedIndices := []uint64{7}
wanted := &ethpb.ActiveSetChanges{
Epoch: 0,
ActivatedPublicKeys: wantedActive,
ActivatedIndices: wantedActiveIndices,
ExitedPublicKeys: wantedExited,
ExitedIndices: wantedExitedIndices,
SlashedPublicKeys: wantedSlashed,
SlashedIndices: wantedSlashedIndices,
EjectedPublicKeys: wantedEjected,
EjectedIndices: wantedEjectedIndices,
}
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
res, err = bs.GetValidatorActiveSetChanges(ctx, &ethpb.GetValidatorActiveSetChangesRequest{
QueryFilter: &ethpb.GetValidatorActiveSetChangesRequest_Epoch{Epoch: 5},
@@ -1269,7 +1361,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
}
wanted.Epoch = 5
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/beacon"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/node"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/validator"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
@@ -90,6 +91,7 @@ type Service struct {
slasherCert string
slasherCredentialError error
slasherClient slashpb.SlasherClient
stateGen *stategen.State
}
// Config options for the beacon node RPC server.
@@ -122,6 +124,7 @@ type Config struct {
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
OperationNotifier opfeed.Notifier
StateGen *stategen.State
}
// NewService instantiates a new RPC service instance that will
@@ -161,6 +164,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
operationNotifier: cfg.OperationNotifier,
slasherProvider: cfg.SlasherProvider,
slasherCert: cfg.SlasherCert,
stateGen: cfg.StateGen,
}
}
@@ -233,6 +237,7 @@ func (s *Service) Start() {
PendingDepositsFetcher: s.pendingDepositFetcher,
GenesisTime: genesisTime,
SlashingsPool: s.slashingsPool,
StateGen: s.stateGen,
}
nodeServer := &node.Server{
BeaconDB: s.beaconDB,
@@ -242,21 +247,24 @@ func (s *Service) Start() {
PeersFetcher: s.peersFetcher,
}
beaconChainServer := &beacon.Server{
Ctx: s.ctx,
BeaconDB: s.beaconDB,
AttestationsPool: s.attestationsPool,
SlashingsPool: s.slashingsPool,
HeadFetcher: s.headFetcher,
FinalizationFetcher: s.finalizationFetcher,
ParticipationFetcher: s.participationFetcher,
ChainStartFetcher: s.chainStartFetcher,
DepositFetcher: s.depositFetcher,
BlockFetcher: s.powChainService,
CanonicalStateChan: s.canonicalStateChan,
GenesisTimeFetcher: s.genesisTimeFetcher,
StateNotifier: s.stateNotifier,
BlockNotifier: s.blockNotifier,
AttestationNotifier: s.operationNotifier,
Ctx: s.ctx,
BeaconDB: s.beaconDB,
AttestationsPool: s.attestationsPool,
SlashingsPool: s.slashingsPool,
HeadFetcher: s.headFetcher,
FinalizationFetcher: s.finalizationFetcher,
ParticipationFetcher: s.participationFetcher,
ChainStartFetcher: s.chainStartFetcher,
DepositFetcher: s.depositFetcher,
BlockFetcher: s.powChainService,
CanonicalStateChan: s.canonicalStateChan,
GenesisTimeFetcher: s.genesisTimeFetcher,
StateNotifier: s.stateNotifier,
BlockNotifier: s.blockNotifier,
AttestationNotifier: s.operationNotifier,
Broadcaster: s.p2p,
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100),
}
aggregatorServer := &aggregator.Server{ValidatorServer: validatorServer}
pb.RegisterAggregatorServiceServer(s.grpcServer, aggregatorServer)

View File

@@ -32,12 +32,14 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",

View File

@@ -4,8 +4,10 @@ import (
"context"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -33,12 +35,18 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", epochStartSlot, err)
}
}
committeeAssignments, proposerIndexToSlot, err := helpers.CommitteeAssignments(s, req.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
// Query the next epoch assignments for committee subnet subscriptions.
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(s, req.Epoch+1)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
var committeeIDs []uint64
var nextCommitteeIDs []uint64
var validatorAssignments []*ethpb.DutiesResponse_Duty
for _, pubKey := range req.PublicKeys {
if ctx.Err() != nil {
@@ -63,10 +71,25 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth
assignment.AttesterSlot = ca.AttesterSlot
assignment.ProposerSlot = proposerIndexToSlot[idx]
assignment.CommitteeIndex = ca.CommitteeIndex
committeeIDs = append(committeeIDs, ca.CommitteeIndex)
}
// Save the next epoch assignments.
ca, ok = nextCommitteeAssignments[idx]
if ok {
nextCommitteeIDs = append(nextCommitteeIDs, ca.CommitteeIndex)
}
}
} else {
vs := vs.validatorStatus(ctx, pubKey, s)
assignment.Status = vs.Status
}
validatorAssignments = append(validatorAssignments, assignment)
}
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs(committeeIDs, req.Epoch)
cache.CommitteeIDs.AddIDs(nextCommitteeIDs, req.Epoch+1)
}
return &ethpb.DutiesResponse{

View File

@@ -6,13 +6,16 @@ import (
"fmt"
"strings"
"testing"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
blk "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -67,10 +70,19 @@ func TestGetDuties_NextEpoch_CantFindValidatorIdx(t *testing.T) {
t.Fatalf("Could not get signing root %v", err)
}
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
0: uint64(height),
},
}
vs := &Server{
BeaconDB: db,
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
Eth1InfoFetcher: p,
DepositFetcher: depositcache.NewDepositCache(),
}
pubKey := pubKey(99999)

View File

@@ -15,6 +15,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
@@ -33,6 +34,12 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
trace.Int64Attribute("committeeIndex", int64(req.CommitteeIndex)),
)
// If attestation committee subnets are enabled, we track the committee
// index into a cache.
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs([]uint64{req.CommitteeIndex}, helpers.SlotToEpoch(req.Slot))
}
if vs.SyncChecker.Syncing() {
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
@@ -123,6 +130,12 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
return nil, status.Error(codes.InvalidArgument, "Incorrect attestation signature")
}
// If attestation committee subnets are enabled, we track the committee
// index into a cache.
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs([]uint64{att.Data.CommitteeIndex}, helpers.SlotToEpoch(att.Data.Slot))
}
root, err := ssz.HashTreeRoot(att.Data)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not tree hash attestation: %v", err)

View File

@@ -18,6 +18,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
@@ -89,8 +90,8 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
Deposits: deposits,
Attestations: atts,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(),
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(),
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx),
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx),
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot),
Graffiti: graffiti[:],
},
@@ -211,9 +212,18 @@ func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, erro
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block *ethpb.SignedBeaconBlock) ([]byte, error) {
beaconState, err := vs.BeaconDB.State(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
var beaconState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
beaconState, err = vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
}
} else {
beaconState, err = vs.BeaconDB.State(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
}
}
root, err := state.CalculateStateRoot(

View File

@@ -99,7 +99,7 @@ func TestGetBlock_OK(t *testing.T) {
privKeys[0],
0, /* validator index */
)
if err := proposerServer.SlashingsPool.InsertProposerSlashing(beaconState, proposerSlashing); err != nil {
if err := proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing); err != nil {
t.Fatal(err)
}
@@ -109,7 +109,7 @@ func TestGetBlock_OK(t *testing.T) {
privKeys[1],
1, /* validator index */
)
if err := proposerServer.SlashingsPool.InsertAttesterSlashing(beaconState, attesterSlashing); err != nil {
if err := proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing); err != nil {
t.Fatal(err)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
ptypes "github.com/gogo/protobuf/types"
@@ -67,6 +68,7 @@ type Server struct {
PendingDepositsFetcher depositcache.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
GenesisTime time.Time
StateGen *stategen.State
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -4,8 +4,10 @@ go_library(
name = "go_default_library",
srcs = [
"cloners.go",
"field_trie.go",
"getters.go",
"setters.go",
"state_trie.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state",
@@ -20,9 +22,11 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/memorypool:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_protolambda_zssz//merkle:go_default_library",
@@ -34,6 +38,8 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"field_trie_test.go",
"getters_test.go",
"references_test.go",
"types_test.go",
],
@@ -44,6 +50,7 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/interop:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",

View File

@@ -0,0 +1,289 @@
package state
import (
"reflect"
"sync"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/memorypool"
)
// FieldTrie is the representation of the representative
// trie of the particular field.
type FieldTrie struct {
*sync.Mutex
*reference
fieldLayers [][]*[32]byte
field fieldIndex
}
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
// trie according to the given parameters. Depending on whether the field is a basic/composite array
// which is either fixed/variable length, it will appropriately determine the trie.
func NewFieldTrie(field fieldIndex, elements interface{}, length uint64) (*FieldTrie, error) {
if elements == nil {
return &FieldTrie{
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
}
datType, ok := fieldMap[field]
if !ok {
return nil, errors.Errorf("unrecognized field in trie")
}
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
if err != nil {
return nil, err
}
switch datType {
case basicArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayer(fieldRoots, length),
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
case compositeArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
default:
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// RecomputeTrie rebuilds the affected branches in the trie according to the provided
// changed indices and elements. This recomputes the trie according to the particular
// field the trie is based on.
func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]byte, error) {
f.Lock()
defer f.Unlock()
var fieldRoot [32]byte
datType, ok := fieldMap[f.field]
if !ok {
return [32]byte{}, errors.Errorf("unrecognized field in trie")
}
fieldRoots, err := fieldConverters(f.field, indices, elements, false)
if err != nil {
return [32]byte{}, err
}
switch datType {
case basicArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayer(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
return fieldRoot, nil
case compositeArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// CopyTrie copies the references to the elements the trie
// is built on.
func (f *FieldTrie) CopyTrie() *FieldTrie {
if f.fieldLayers == nil {
return &FieldTrie{
field: f.field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
dstFieldTrie := [][]*[32]byte{}
switch f.field {
case randaoMixes:
dstFieldTrie = memorypool.GetRandaoMixesTrie(len(f.fieldLayers))
case blockRoots:
dstFieldTrie = memorypool.GetBlockRootsTrie(len(f.fieldLayers))
case stateRoots:
dstFieldTrie = memorypool.GetStateRootsTrie(len(f.fieldLayers))
default:
dstFieldTrie = make([][]*[32]byte, len(f.fieldLayers))
}
for i, layer := range f.fieldLayers {
if len(dstFieldTrie[i]) < len(layer) {
diffSlice := make([]*[32]byte, len(layer)-len(dstFieldTrie[i]))
dstFieldTrie[i] = append(dstFieldTrie[i], diffSlice...)
}
dstFieldTrie[i] = dstFieldTrie[i][:len(layer)]
copy(dstFieldTrie[i], layer)
}
return &FieldTrie{
fieldLayers: dstFieldTrie,
field: f.field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
// TrieRoot returns the corresponding root of the trie.
func (f *FieldTrie) TrieRoot() ([32]byte, error) {
datType, ok := fieldMap[f.field]
if !ok {
return [32]byte{}, errors.Errorf("unrecognized field in trie")
}
switch datType {
case basicArray:
return *f.fieldLayers[len(f.fieldLayers)-1][0], nil
case compositeArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// this converts the corresponding field and the provided elements to the appropriate roots.
func fieldConverters(field fieldIndex, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch field {
case blockRoots, stateRoots, randaoMixes:
val, ok := elements.([][]byte)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
}
return handleByteArrays(val, indices, convertAll)
case eth1DataVotes:
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return handleEth1DataSlice(val, indices, convertAll)
case validators:
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return handleValidatorSlice(val, indices, convertAll)
case previousEpochAttestations, currentEpochAttestations:
val, ok := elements.([]*pb.PendingAttestation)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*pb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestation(val, indices, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input []byte) {
newRoot := bytesutil.ToBytes32(input)
roots = append(roots, newRoot)
}
if convertAll {
for i := range val {
rootCreater(val[i])
}
return roots, nil
}
for _, idx := range indices {
rootCreater(val[idx])
}
return roots, nil
}
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input *ethpb.Eth1Data) error {
newRoot, err := stateutil.Eth1Root(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreater(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreater(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input *ethpb.Validator) error {
newRoot, err := stateutil.ValidatorRoot(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreater(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreater(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}
func handlePendingAttestation(val []*pb.PendingAttestation, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreator := func(input *pb.PendingAttestation) error {
newRoot, err := stateutil.PendingAttestationRoot(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreator(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}

View File

@@ -0,0 +1,98 @@
package state_test
import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestFieldTrie_NewTrie(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 40)
// 5 represents the enum value of state roots
trie, err := state.NewFieldTrie(5, newState.StateRoots(), params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
t.Fatal(err)
}
root, err := stateutil.RootsArrayHashTreeRoot(newState.StateRoots(), params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots")
if err != nil {
t.Fatal(err)
}
newRoot, err := trie.TrieRoot()
if newRoot != root {
t.Errorf("Wanted root of %#x but got %#x", root, newRoot)
}
}
func TestFieldTrie_RecomputeTrie(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
// 10 represents the enum value of validators
trie, err := state.NewFieldTrie(10, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
t.Fatal(err)
}
changedIdx := []uint64{2, 29}
val1, err := newState.ValidatorAtIndex(10)
if err != nil {
t.Fatal(err)
}
val2, err := newState.ValidatorAtIndex(11)
if err != nil {
t.Fatal(err)
}
val1.Slashed = true
val1.ExitEpoch = 20
val2.Slashed = true
val2.ExitEpoch = 40
changedVals := []*ethpb.Validator{val1, val2}
newState.UpdateValidatorAtIndex(changedIdx[0], changedVals[0])
newState.UpdateValidatorAtIndex(changedIdx[1], changedVals[1])
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
if err != nil {
t.Fatal(err)
}
root, err := trie.RecomputeTrie(changedIdx, newState.Validators())
if err != nil {
t.Fatal(err)
}
if root != expectedRoot {
t.Errorf("Wanted root of %#x but got %#x", expectedRoot, root)
}
}
func TestFieldTrie_CopyTrieImmutable(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
// 12 represents the enum value of randao mixes.
trie, err := state.NewFieldTrie(12, newState.RandaoMixes(), params.BeaconConfig().EpochsPerHistoricalVector)
if err != nil {
t.Fatal(err)
}
newTrie := trie.CopyTrie()
changedIdx := []uint64{2, 29}
changedVals := [][32]byte{{'A', 'B'}, {'C', 'D'}}
newState.UpdateRandaoMixesAtIndex(changedVals[0][:], changedIdx[0])
newState.UpdateRandaoMixesAtIndex(changedVals[1][:], changedIdx[1])
root, err := trie.RecomputeTrie(changedIdx, newState.RandaoMixes())
if err != nil {
t.Fatal(err)
}
newRoot, err := newTrie.TrieRoot()
if err != nil {
t.Fatal(err)
}
if root == newRoot {
t.Errorf("Wanted roots to be different, but they are the same: %#x", root)
}
}

View File

@@ -134,6 +134,9 @@ func (b *BeaconState) Slot() uint64 {
if !b.HasInnerState() {
return 0
}
b.lock.RLock()
defer b.lock.RUnlock()
return b.state.Slot
}

View File

@@ -0,0 +1,25 @@
package state
import (
"sync"
"testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestBeaconState_SlotDataRace(t *testing.T) {
headState, _ := InitializeFromProto(&pb.BeaconState{Slot: 1})
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
headState.SetSlot(uint64(0))
wg.Done()
}()
go func() {
headState.Slot()
wg.Done()
}()
wg.Wait()
}

View File

@@ -11,36 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
type fieldIndex int
// Below we define a set of useful enum values for the field
// indices of the beacon state. For example, genesisTime is the
// 0th field of the beacon state. This is helpful when we are
// updating the Merkle branches up the trie representation
// of the beacon state.
const (
genesisTime fieldIndex = iota
slot
fork
latestBlockHeader
blockRoots
stateRoots
historicalRoots
eth1Data
eth1DataVotes
eth1DepositIndex
validators
balances
randaoMixes
slashings
previousEpochAttestations
currentEpochAttestations
justificationBits
previousJustifiedCheckpoint
currentJustifiedCheckpoint
finalizedCheckpoint
)
// SetGenesisTime for the beacon state.
func (b *BeaconState) SetGenesisTime(val uint64) error {
b.lock.Lock()
@@ -123,7 +93,7 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
// Copy on write since this is a shared array.
r = b.BlockRoots()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[blockRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -136,6 +106,7 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
b.state.BlockRoots = r
b.markFieldAsDirty(blockRoots)
b.AddDirtyIndices(blockRoots, []uint64{idx})
return nil
}
@@ -153,6 +124,7 @@ func (b *BeaconState) SetStateRoots(val [][]byte) error {
b.state.StateRoots = val
b.markFieldAsDirty(stateRoots)
b.rebuildTrie[stateRoots] = true
return nil
}
@@ -173,7 +145,7 @@ func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) err
// Perform a copy since this is a shared reference and we don't want to mutate others.
r = b.StateRoots()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[stateRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -186,6 +158,7 @@ func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) err
b.state.StateRoots = r
b.markFieldAsDirty(stateRoots)
b.AddDirtyIndices(stateRoots, []uint64{idx})
return nil
}
@@ -233,6 +206,7 @@ func (b *BeaconState) SetEth1DataVotes(val []*ethpb.Eth1Data) error {
b.state.Eth1DataVotes = val
b.markFieldAsDirty(eth1DataVotes)
b.rebuildTrie[eth1DataVotes] = true
return nil
}
@@ -246,7 +220,7 @@ func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error {
votes := b.state.Eth1DataVotes
if b.sharedFieldReferences[eth1DataVotes].refs > 1 {
votes = b.Eth1DataVotes()
b.sharedFieldReferences[eth1DataVotes].refs--
b.sharedFieldReferences[eth1DataVotes].MinusRef()
b.sharedFieldReferences[eth1DataVotes] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -256,6 +230,7 @@ func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error {
b.state.Eth1DataVotes = append(votes, val)
b.markFieldAsDirty(eth1DataVotes)
b.AddDirtyIndices(eth1DataVotes, []uint64{uint64(len(b.state.Eth1DataVotes) - 1)})
return nil
}
@@ -285,12 +260,13 @@ func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
b.sharedFieldReferences[validators].refs--
b.sharedFieldReferences[validators] = &reference{refs: 1}
b.markFieldAsDirty(validators)
b.rebuildTrie[validators] = true
return nil
}
// ApplyToEveryValidator applies the provided callback function to each validator in the
// validator registry.
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) error) error {
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) (bool, error)) error {
if !b.HasInnerState() {
return ErrNilInnerState
}
@@ -300,16 +276,19 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator
// Perform a copy since this is a shared reference and we don't want to mutate others.
v = b.Validators()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
changedVals := []uint64{}
for i, val := range v {
err := f(i, val)
changed, err := f(i, val)
if err != nil {
return err
}
if changed {
changedVals = append(changedVals, uint64(i))
}
}
b.lock.Lock()
@@ -317,6 +296,8 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator
b.state.Validators = v
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, changedVals)
return nil
}
@@ -336,7 +317,7 @@ func (b *BeaconState) UpdateValidatorAtIndex(idx uint64, val *ethpb.Validator) e
// Perform a copy since this is a shared reference and we don't want to mutate others.
v = b.Validators()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -347,6 +328,8 @@ func (b *BeaconState) UpdateValidatorAtIndex(idx uint64, val *ethpb.Validator) e
v[idx] = val
b.state.Validators = v
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, []uint64{idx})
return nil
}
@@ -394,7 +377,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx uint64, val uint64) error {
bals := b.state.Balances
if b.sharedFieldReferences[balances].refs > 1 {
bals = b.Balances()
b.sharedFieldReferences[balances].refs--
b.sharedFieldReferences[balances].MinusRef()
b.sharedFieldReferences[balances] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -422,6 +405,7 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.state.RandaoMixes = val
b.markFieldAsDirty(randaoMixes)
b.rebuildTrie[randaoMixes] = true
return nil
}
@@ -439,7 +423,7 @@ func (b *BeaconState) UpdateRandaoMixesAtIndex(val []byte, idx uint64) error {
mixes := b.state.RandaoMixes
if refs := b.sharedFieldReferences[randaoMixes].refs; refs > 1 {
mixes = b.RandaoMixes()
b.sharedFieldReferences[randaoMixes].refs--
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -450,6 +434,8 @@ func (b *BeaconState) UpdateRandaoMixesAtIndex(val []byte, idx uint64) error {
mixes[idx] = val
b.state.RandaoMixes = mixes
b.markFieldAsDirty(randaoMixes)
b.AddDirtyIndices(randaoMixes, []uint64{idx})
return nil
}
@@ -484,7 +470,7 @@ func (b *BeaconState) UpdateSlashingsAtIndex(idx uint64, val uint64) error {
if b.sharedFieldReferences[slashings].refs > 1 {
s = b.Slashings()
b.sharedFieldReferences[slashings].refs--
b.sharedFieldReferences[slashings].MinusRef()
b.sharedFieldReferences[slashings] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -514,6 +500,7 @@ func (b *BeaconState) SetPreviousEpochAttestations(val []*pbp2p.PendingAttestati
b.state.PreviousEpochAttestations = val
b.markFieldAsDirty(previousEpochAttestations)
b.rebuildTrie[previousEpochAttestations] = true
return nil
}
@@ -531,6 +518,7 @@ func (b *BeaconState) SetCurrentEpochAttestations(val []*pbp2p.PendingAttestatio
b.state.CurrentEpochAttestations = val
b.markFieldAsDirty(currentEpochAttestations)
b.rebuildTrie[currentEpochAttestations] = true
return nil
}
@@ -544,7 +532,7 @@ func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
roots := b.state.HistoricalRoots
if b.sharedFieldReferences[historicalRoots].refs > 1 {
roots = b.HistoricalRoots()
b.sharedFieldReferences[historicalRoots].refs--
b.sharedFieldReferences[historicalRoots].MinusRef()
b.sharedFieldReferences[historicalRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -568,7 +556,7 @@ func (b *BeaconState) AppendCurrentEpochAttestations(val *pbp2p.PendingAttestati
atts := b.state.CurrentEpochAttestations
if b.sharedFieldReferences[currentEpochAttestations].refs > 1 {
atts = b.CurrentEpochAttestations()
b.sharedFieldReferences[currentEpochAttestations].refs--
b.sharedFieldReferences[currentEpochAttestations].MinusRef()
b.sharedFieldReferences[currentEpochAttestations] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -578,6 +566,7 @@ func (b *BeaconState) AppendCurrentEpochAttestations(val *pbp2p.PendingAttestati
b.state.CurrentEpochAttestations = append(atts, val)
b.markFieldAsDirty(currentEpochAttestations)
b.dirtyIndices[currentEpochAttestations] = append(b.dirtyIndices[currentEpochAttestations], uint64(len(b.state.CurrentEpochAttestations)-1))
return nil
}
@@ -591,7 +580,7 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *pbp2p.PendingAttestat
atts := b.state.PreviousEpochAttestations
if b.sharedFieldReferences[previousEpochAttestations].refs > 1 {
atts = b.PreviousEpochAttestations()
b.sharedFieldReferences[previousEpochAttestations].refs--
b.sharedFieldReferences[previousEpochAttestations].MinusRef()
b.sharedFieldReferences[previousEpochAttestations] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -601,6 +590,8 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *pbp2p.PendingAttestat
b.state.PreviousEpochAttestations = append(atts, val)
b.markFieldAsDirty(previousEpochAttestations)
b.AddDirtyIndices(previousEpochAttestations, []uint64{uint64(len(b.state.PreviousEpochAttestations) - 1)})
return nil
}
@@ -614,7 +605,7 @@ func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
vals := b.state.Validators
if b.sharedFieldReferences[validators].refs > 1 {
vals = b.Validators()
b.sharedFieldReferences[validators].refs--
b.sharedFieldReferences[validators].MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -624,6 +615,7 @@ func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
b.state.Validators = append(vals, val)
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, []uint64{uint64(len(b.state.Validators) - 1)})
return nil
}
@@ -638,7 +630,7 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
bals := b.state.Balances
if b.sharedFieldReferences[balances].refs > 1 {
bals = b.Balances()
b.sharedFieldReferences[balances].refs--
b.sharedFieldReferences[balances].MinusRef()
b.sharedFieldReferences[balances] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -745,3 +737,9 @@ func (b *BeaconState) markFieldAsDirty(field fieldIndex) {
}
// do nothing if field already exists
}
// AddDirtyIndices adds the relevant dirty field indices, so that they
// can be recomputed.
func (b *BeaconState) AddDirtyIndices(index fieldIndex, indices []uint64) {
b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...)
}

View File

@@ -0,0 +1,403 @@
package state
import (
"runtime"
"sort"
"sync"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/protolambda/zssz/merkle"
coreutils "github.com/prysmaticlabs/prysm/beacon-chain/core/state/stateutils"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/memorypool"
"github.com/prysmaticlabs/prysm/shared/params"
)
// InitializeFromProto the beacon state from a protobuf representation.
func InitializeFromProto(st *pbp2p.BeaconState) (*BeaconState, error) {
return InitializeFromProtoUnsafe(proto.Clone(st).(*pbp2p.BeaconState))
}
// InitializeFromProtoUnsafe directly uses the beacon state protobuf pointer
// and sets it as the inner state of the BeaconState type.
func InitializeFromProtoUnsafe(st *pbp2p.BeaconState) (*BeaconState, error) {
b := &BeaconState{
state: st,
dirtyFields: make(map[fieldIndex]interface{}, 20),
dirtyIndices: make(map[fieldIndex][]uint64, 20),
stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
rebuildTrie: make(map[fieldIndex]bool, 20),
valIdxMap: coreutils.ValidatorIndexMap(st.Validators),
}
for i := 0; i < 20; i++ {
b.dirtyFields[fieldIndex(i)] = true
b.rebuildTrie[fieldIndex(i)] = true
b.dirtyIndices[fieldIndex(i)] = []uint64{}
b.stateFieldLeaves[fieldIndex(i)] = &FieldTrie{
field: fieldIndex(i),
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = &reference{refs: 1}
b.sharedFieldReferences[stateRoots] = &reference{refs: 1}
b.sharedFieldReferences[blockRoots] = &reference{refs: 1}
b.sharedFieldReferences[previousEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[currentEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[slashings] = &reference{refs: 1}
b.sharedFieldReferences[eth1DataVotes] = &reference{refs: 1}
b.sharedFieldReferences[validators] = &reference{refs: 1}
b.sharedFieldReferences[balances] = &reference{refs: 1}
b.sharedFieldReferences[historicalRoots] = &reference{refs: 1}
return b, nil
}
// Copy returns a deep copy of the beacon state.
func (b *BeaconState) Copy() *BeaconState {
if !b.HasInnerState() {
return nil
}
b.lock.RLock()
defer b.lock.RUnlock()
dst := &BeaconState{
state: &pbp2p.BeaconState{
// Primitive types, safe to copy.
GenesisTime: b.state.GenesisTime,
Slot: b.state.Slot,
Eth1DepositIndex: b.state.Eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
RandaoMixes: b.state.RandaoMixes,
StateRoots: b.state.StateRoots,
BlockRoots: b.state.BlockRoots,
PreviousEpochAttestations: b.state.PreviousEpochAttestations,
CurrentEpochAttestations: b.state.CurrentEpochAttestations,
Slashings: b.state.Slashings,
Eth1DataVotes: b.state.Eth1DataVotes,
// Large arrays, increases over time.
Validators: b.state.Validators,
Balances: b.state.Balances,
HistoricalRoots: b.state.HistoricalRoots,
// Everything else, too small to be concerned about, constant size.
Fork: b.Fork(),
LatestBlockHeader: b.LatestBlockHeader(),
Eth1Data: b.Eth1Data(),
JustificationBits: b.JustificationBits(),
PreviousJustifiedCheckpoint: b.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: b.FinalizedCheckpoint(),
},
dirtyFields: make(map[fieldIndex]interface{}, 20),
dirtyIndices: make(map[fieldIndex][]uint64, 20),
rebuildTrie: make(map[fieldIndex]bool, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20),
// Copy on write validator index map.
valIdxMap: b.valIdxMap,
}
for field, ref := range b.sharedFieldReferences {
ref.AddRef()
dst.sharedFieldReferences[field] = ref
}
for i := range b.dirtyFields {
dst.dirtyFields[i] = true
}
for i := range b.dirtyIndices {
indices := make([]uint64, len(b.dirtyIndices[i]))
copy(indices, b.dirtyIndices[i])
dst.dirtyIndices[i] = indices
}
for i := range b.rebuildTrie {
dst.rebuildTrie[i] = true
}
for fldIdx, fieldTrie := range b.stateFieldLeaves {
dst.stateFieldLeaves[fldIdx] = fieldTrie
if fieldTrie.reference != nil {
fieldTrie.Lock()
fieldTrie.AddRef()
fieldTrie.Unlock()
}
}
if b.merkleLayers != nil {
dst.merkleLayers = make([][][]byte, len(b.merkleLayers))
for i, layer := range b.merkleLayers {
dst.merkleLayers[i] = make([][]byte, len(layer))
for j, content := range layer {
dst.merkleLayers[i][j] = make([]byte, len(content))
copy(dst.merkleLayers[i][j], content)
}
}
}
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(dst, func(b *BeaconState) {
for field, v := range b.sharedFieldReferences {
v.refs--
if b.stateFieldLeaves[field].reference != nil {
b.stateFieldLeaves[field].MinusRef()
}
if field == randaoMixes && v.refs == 0 {
memorypool.PutDoubleByteSlice(b.state.RandaoMixes)
if b.stateFieldLeaves[field].refs == 0 {
memorypool.PutRandaoMixesTrie(b.stateFieldLeaves[randaoMixes].fieldLayers)
}
}
if field == blockRoots && v.refs == 0 && b.stateFieldLeaves[field].refs == 0 {
memorypool.PutBlockRootsTrie(b.stateFieldLeaves[blockRoots].fieldLayers)
}
if field == stateRoots && v.refs == 0 && b.stateFieldLeaves[field].refs == 0 {
memorypool.PutStateRootsTrie(b.stateFieldLeaves[stateRoots].fieldLayers)
}
}
})
return dst
}
// HashTreeRoot of the beacon state retrieves the Merkle root of the trie
// representation of the beacon state based on the eth2 Simple Serialize specification.
func (b *BeaconState) HashTreeRoot() ([32]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.merkleLayers == nil || len(b.merkleLayers) == 0 {
fieldRoots, err := stateutil.ComputeFieldRoots(b.state)
if err != nil {
return [32]byte{}, err
}
layers := merkleize(fieldRoots)
b.merkleLayers = layers
b.dirtyFields = make(map[fieldIndex]interface{})
}
for field := range b.dirtyFields {
root, err := b.rootSelector(field)
if err != nil {
return [32]byte{}, err
}
b.merkleLayers[0][field] = root[:]
b.recomputeRoot(int(field))
delete(b.dirtyFields, field)
}
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
}
// Merkleize 32-byte leaves into a Merkle trie for its adequate depth, returning
// the resulting layers of the trie based on the appropriate depth. This function
// pads the leaves to a power-of-two length.
func merkleize(leaves [][]byte) [][][]byte {
hashFunc := hashutil.CustomSHA256Hasher()
layers := make([][][]byte, merkle.GetDepth(uint64(len(leaves)))+1)
for len(leaves) != 32 {
leaves = append(leaves, make([]byte, 32))
}
currentLayer := leaves
layers[0] = currentLayer
// We keep track of the hash layers of a Merkle trie until we reach
// the top layer of length 1, which contains the single root element.
// [Root] -> Top layer has length 1.
// [E] [F] -> This layer has length 2.
// [A] [B] [C] [D] -> The bottom layer has length 4 (needs to be a power of two).
i := 1
for len(currentLayer) > 1 && i < len(layers) {
layer := make([][]byte, 0)
for i := 0; i < len(currentLayer); i += 2 {
hashedChunk := hashFunc(append(currentLayer[i], currentLayer[i+1]...))
layer = append(layer, hashedChunk[:])
}
currentLayer = layer
layers[i] = currentLayer
i++
}
return layers
}
func (b *BeaconState) rootSelector(field fieldIndex) ([32]byte, error) {
switch field {
case genesisTime:
return stateutil.Uint64Root(b.state.GenesisTime), nil
case slot:
return stateutil.Uint64Root(b.state.Slot), nil
case eth1DepositIndex:
return stateutil.Uint64Root(b.state.Eth1DepositIndex), nil
case fork:
return stateutil.ForkRoot(b.state.Fork)
case latestBlockHeader:
return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader)
case blockRoots:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(blockRoots, b.state.BlockRoots)
}
return stateutil.RootsArrayHashTreeRoot(b.state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots")
case stateRoots:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(stateRoots, b.state.StateRoots)
}
return stateutil.RootsArrayHashTreeRoot(b.state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots")
case historicalRoots:
return stateutil.HistoricalRootsRoot(b.state.HistoricalRoots)
case eth1Data:
return stateutil.Eth1Root(b.state.Eth1Data)
case eth1DataVotes:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Eth1DataVotes, params.BeaconConfig().SlotsPerEth1VotingPeriod)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.Eth1DataVotes)
}
return stateutil.Eth1DataVotesRoot(b.state.Eth1DataVotes)
case validators:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Validators, params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[validators] = []uint64{}
delete(b.rebuildTrie, validators)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(validators, b.state.Validators)
}
return stateutil.ValidatorRegistryRoot(b.state.Validators)
case balances:
return stateutil.ValidatorBalancesRoot(b.state.Balances)
case randaoMixes:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(randaoMixes, b.state.RandaoMixes)
}
return stateutil.RootsArrayHashTreeRoot(b.state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector, "RandaoMixes")
case slashings:
return stateutil.SlashingsRoot(b.state.Slashings)
case previousEpochAttestations:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.PreviousEpochAttestations, params.BeaconConfig().MaxAttestations*params.BeaconConfig().SlotsPerEpoch)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.PreviousEpochAttestations)
}
return stateutil.EpochAttestationsRoot(b.state.PreviousEpochAttestations)
case currentEpochAttestations:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.CurrentEpochAttestations, params.BeaconConfig().MaxAttestations*params.BeaconConfig().SlotsPerEpoch)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.CurrentEpochAttestations)
}
return stateutil.EpochAttestationsRoot(b.state.CurrentEpochAttestations)
case justificationBits:
return bytesutil.ToBytes32(b.state.JustificationBits), nil
case previousJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.PreviousJustifiedCheckpoint)
case currentJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.CurrentJustifiedCheckpoint)
case finalizedCheckpoint:
return stateutil.CheckpointRoot(b.state.FinalizedCheckpoint)
}
return [32]byte{}, errors.New("invalid field index provided")
}
func (b *BeaconState) recomputeFieldTrie(index fieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
if fTrie.refs > 1 {
fTrie.Lock()
defer fTrie.Unlock()
fTrie.MinusRef()
newTrie := fTrie.CopyTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
}
// remove duplicate indexes
b.dirtyIndices[index] = sliceutil.UnionUint64(b.dirtyIndices[index], []uint64{})
// sort indexes again
sort.Slice(b.dirtyIndices[index], func(i int, j int) bool {
return b.dirtyIndices[index][i] < b.dirtyIndices[index][j]
})
root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[index] = []uint64{}
return root, nil
}
func (b *BeaconState) resetFieldTrie(index fieldIndex, elements interface{}, length uint64) error {
fTrie := b.stateFieldLeaves[index]
var err error
fTrie, err = NewFieldTrie(index, elements, length)
if err != nil {
return err
}
b.stateFieldLeaves[index] = fTrie
b.dirtyIndices[index] = []uint64{}
return nil
}

View File

@@ -3,19 +3,27 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cold.go",
"epoch_boundary_root.go",
"errors.go",
"getter.go",
"hot.go",
"log.go",
"migrate.go",
"replay.go",
"service.go",
"setter.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
@@ -30,9 +38,14 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"cold_test.go",
"epoch_boundary_root_test.go",
"getter_test.go",
"hot_test.go",
"migrate_test.go",
"replay_test.go",
"service_test.go",
"setter_test.go",
],
embed = [":go_default_library"],
deps = [
@@ -44,7 +57,9 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -0,0 +1,221 @@
package stategen
import (
"context"
"encoding/hex"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This saves a pre finalized beacon state in the cold section of the DB. The returns an error
// and not store anything if the state does not lie on an archive point boundary.
func (s *State) saveColdState(ctx context.Context, blockRoot [32]byte, state *state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.saveColdState")
defer span.End()
if state.Slot()%s.slotsPerArchivedPoint != 0 {
return errSlotNonArchivedPoint
}
archivedPointIndex := state.Slot() / s.slotsPerArchivedPoint
if err := s.beaconDB.SaveArchivedPointState(ctx, state, archivedPointIndex); err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, blockRoot, archivedPointIndex); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": state.Slot(),
"blockRoot": hex.EncodeToString(bytesutil.Trunc(blockRoot[:]))}).Info("Saved full state on archived point")
return nil
}
// This loads the cold state by block root, it decides whether to load from archived point (faster) or
// somewhere between archived points (slower) because it requires replaying blocks.
// This method is more efficient than load cold state by slot.
func (s *State) loadColdStateByRoot(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdStateByRoot")
defer span.End()
summary, err := s.beaconDB.StateSummary(ctx, blockRoot)
if err != nil {
return nil, err
}
if summary == nil {
return nil, errUnknownStateSummary
}
// Use the archived point state if the summary slot lies on top of the archived point.
if summary.Slot%s.slotsPerArchivedPoint == 0 {
archivedPoint := summary.Slot / s.slotsPerArchivedPoint
s, err := s.loadColdStateByArchivedPoint(ctx, archivedPoint)
if err != nil {
return nil, errors.Wrap(err, "could not get cold state using archived index")
}
if s == nil {
return nil, errUnknownArchivedState
}
return s, nil
}
return s.loadColdIntermediateStateByRoot(ctx, summary.Slot, blockRoot)
}
// This loads the cold state for the input archived point.
func (s *State) loadColdStateByArchivedPoint(ctx context.Context, archivedPoint uint64) (*state.BeaconState, error) {
return s.beaconDB.ArchivedPointState(ctx, archivedPoint)
}
// This loads a cold state by slot and block root combinations.
// This is a faster implementation than by slot given the input block root is provided.
func (s *State) loadColdIntermediateStateByRoot(ctx context.Context, slot uint64, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdIntermediateStateByRoot")
defer span.End()
// Load the archive point for lower side of the intermediate state.
lowArchivedPointIdx := slot / s.slotsPerArchivedPoint
lowArchivedPointState, err := s.archivedPointByIndex(ctx, lowArchivedPointIdx)
if err != nil {
return nil, errors.Wrap(err, "could not get lower archived state using index")
}
if lowArchivedPointState == nil {
return nil, errUnknownArchivedState
}
replayBlks, err := s.LoadBlocks(ctx, lowArchivedPointState.Slot()+1, slot, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get load blocks for cold state using slot")
}
return s.ReplayBlocks(ctx, lowArchivedPointState, replayBlks, slot)
}
// This loads a cold state by slot where the slot lies between the archived point.
// This is a slower implementation given there's no root and slot is the only argument. It requires fetching
// all the blocks between the archival points.
func (s *State) loadColdIntermediateStateBySlot(ctx context.Context, slot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdIntermediateStateBySlot")
defer span.End()
// Load the archive point for lower and high side of the intermediate state.
lowArchivedPointIdx := slot / s.slotsPerArchivedPoint
highArchivedPointIdx := lowArchivedPointIdx + 1
lowArchivedPointState, err := s.archivedPointByIndex(ctx, lowArchivedPointIdx)
if err != nil {
return nil, errors.Wrap(err, "could not get lower bound archived state using index")
}
if lowArchivedPointState == nil {
return nil, errUnknownArchivedState
}
// If the slot of the high archived point lies outside of the split slot, use the split slot and root
// for the upper archived point.
var highArchivedPointRoot [32]byte
highArchivedPointSlot := highArchivedPointIdx * s.slotsPerArchivedPoint
if highArchivedPointSlot >= s.splitInfo.slot {
highArchivedPointRoot = s.splitInfo.root
highArchivedPointSlot = s.splitInfo.slot
} else {
if _, err := s.archivedPointByIndex(ctx, highArchivedPointIdx); err != nil {
return nil, errors.Wrap(err, "could not get upper bound archived state using index")
}
highArchivedPointRoot = s.beaconDB.ArchivedPointRoot(ctx, highArchivedPointIdx)
slot, err := s.blockRootSlot(ctx, highArchivedPointRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get high archived point slot")
}
highArchivedPointSlot = slot
}
replayBlks, err := s.LoadBlocks(ctx, lowArchivedPointState.Slot()+1, highArchivedPointSlot, highArchivedPointRoot)
if err != nil {
return nil, errors.Wrap(err, "could not load block for cold state using slot")
}
return s.ReplayBlocks(ctx, lowArchivedPointState, replayBlks, slot)
}
// Given the archive index, this returns the archived cold state in the DB.
// If the archived state does not exist in the state, it'll compute it and save it.
func (s *State) archivedPointByIndex(ctx context.Context, archiveIndex uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadArchivedPointByIndex")
defer span.End()
if s.beaconDB.HasArchivedPoint(ctx, archiveIndex) {
return s.beaconDB.ArchivedPointState(ctx, archiveIndex)
}
// If for certain reasons, archived point does not exist in DB,
// a node should regenerate it and save it.
return s.recoverArchivedPointByIndex(ctx, archiveIndex)
}
// This recovers an archived point by index. For certain reasons (ex. user toggles feature flag),
// an archived point may not be present in the DB. This regenerates the archived point state via
// playback and saves the archived root/state to the DB.
func (s *State) recoverArchivedPointByIndex(ctx context.Context, archiveIndex uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.recoverArchivedPointByIndex")
defer span.End()
archivedSlot := archiveIndex * s.slotsPerArchivedPoint
archivedState, err := s.ComputeStateUpToSlot(ctx, archivedSlot)
if err != nil {
return nil, errors.Wrap(err, "could not compute state up to archived index slot")
}
if archivedState == nil {
return nil, errUnknownArchivedState
}
lastRoot, _, err := s.lastSavedBlock(ctx, archivedSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get last valid block up to archived index slot")
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, lastRoot, archiveIndex); err != nil {
return nil, err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, archivedState, archiveIndex); err != nil {
return nil, err
}
return archivedState, nil
}
// Given a block root, this returns the slot of the block root using state summary look up in DB.
// If state summary does not exist in DB, it will recover the state summary and save it to the DB.
// This is used to cover corner cases where users toggle new state service's feature flag.
func (s *State) blockRootSlot(ctx context.Context, blockRoot [32]byte) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.blockRootSlot")
defer span.End()
if s.beaconDB.HasStateSummary(ctx, blockRoot) {
summary, err := s.beaconDB.StateSummary(ctx, blockRoot)
if err != nil {
return 0, nil
}
if summary == nil {
return 0, errUnknownStateSummary
}
return summary.Slot, nil
}
// Couldn't find state summary in DB. Retry with block bucket to get block slot.
b, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
return 0, err
}
if b == nil || b.Block == nil {
return 0, errUnknownBlock
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: blockRoot[:], Slot: b.Block.Slot}); err != nil {
return 0, errors.Wrap(err, "could not save state summary")
}
return b.Block.Slot, nil
}

View File

@@ -0,0 +1,358 @@
package stategen
import (
"context"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestSaveColdState_NonArchivedPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
if err := service.saveColdState(ctx, [32]byte{}, beaconState); err != errSlotNonArchivedPoint {
t.Error("Did not get wanted error")
}
}
func TestSaveColdState_CanSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(1)
r := [32]byte{'a'}
if err := service.saveColdState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
if !service.beaconDB.HasArchivedPoint(ctx, 1) {
t.Error("Did not save cold state")
}
if service.beaconDB.ArchivedPointRoot(ctx, 1) != r {
t.Error("Did not get wanted root")
}
receivedState, err := service.beaconDB.ArchivedPointState(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(receivedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not get wanted state")
}
}
func TestLoadColdStateByRoot_NoStateSummary(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
if _, err := service.loadColdStateByRoot(ctx, [32]byte{'a'}); err != errUnknownStateSummary {
t.Fatal("Did not get correct error")
}
}
func TestLoadColdStateByRoot_ByArchivedPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
r := [32]byte{'a'}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
Slot: 1,
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.loadColdStateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateByRoot_IntermediatePlayback(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 1); err != nil {
t.Fatal(err)
}
r := [32]byte{'a'}
slot := uint64(3)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
Slot: slot,
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.loadColdStateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateBySlotIntermediatePlayback_BeforeCutoff(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 1); err != nil {
t.Fatal(err)
}
slot := uint64(20)
loadedState, err := service.loadColdIntermediateStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateBySlotIntermediatePlayback_AfterCutoff(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 0); err != nil {
t.Fatal(err)
}
slot := uint64(10)
loadedState, err := service.loadColdIntermediateStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateByRoot_UnknownArchivedState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
if _, err := service.loadColdIntermediateStateBySlot(ctx, 0); !strings.Contains(err.Error(), errUnknownArchivedState.Error()) {
t.Log(err)
t.Error("Did not get wanted error")
}
}
func TestArchivedPointByIndex_HasPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
index := uint64(999)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, index); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{'A'}, index); err != nil {
t.Fatal(err)
}
savedArchivedState, err := service.archivedPointByIndex(ctx, index)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(beaconState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff saved state")
}
}
func TestArchivedPointByIndex_DoesntHavePoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
service.slotsPerArchivedPoint = 32
recoveredState, err := service.archivedPointByIndex(ctx, 2)
if err != nil {
t.Fatal(err)
}
if recoveredState.Slot() != service.slotsPerArchivedPoint*2 {
t.Error("Diff state slot")
}
savedArchivedState, err := service.beaconDB.ArchivedPointState(ctx, 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(recoveredState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff saved archived state")
}
}
func TestRecoverArchivedPointByIndex_CanRecover(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
gRoot, err := ssz.HashTreeRoot(gBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveBlock(ctx, gBlk); err != nil {
t.Fatal(err)
}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveState(ctx, beaconState, gRoot); err != nil {
t.Fatal(err)
}
service.slotsPerArchivedPoint = 32
recoveredState, err := service.recoverArchivedPointByIndex(ctx, 1)
if err != nil {
t.Fatal(err)
}
if recoveredState.Slot() != service.slotsPerArchivedPoint {
t.Error("Diff state slot")
}
savedArchivedState, err := service.beaconDB.ArchivedPointState(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(recoveredState.InnerStateUnsafe(), savedArchivedState.InnerStateUnsafe()) {
t.Error("Diff savled state")
}
}
func TestBlockRootSlot_Exists(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
bRoot := [32]byte{'A'}
bSlot := uint64(100)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: bSlot,
Root: bRoot[:],
}); err != nil {
t.Fatal(err)
}
slot, err := service.blockRootSlot(ctx, bRoot)
if err != nil {
t.Fatal(err)
}
if slot != bSlot {
t.Error("Did not get correct block root slot")
}
}
func TestBlockRootSlot_CanRecoverAndSave(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
bSlot := uint64(100)
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: bSlot}}
bRoot, _ := ssz.HashTreeRoot(b.Block)
if err := service.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
slot, err := service.blockRootSlot(ctx, bRoot)
if err != nil {
t.Fatal(err)
}
if slot != bSlot {
t.Error("Did not get correct block root slot")
}
// Verify state summary is saved.
if !service.beaconDB.HasStateSummary(ctx, bRoot) {
t.Error("State summary not saved")
}
}

Some files were not shown because too many files have changed in this diff Show More