Compare commits

...

168 Commits

Author SHA1 Message Date
Preston Van Loon
0fff07a93b Create CODEOWNERS (#5330)
* Create CODEOWNERS
* Move codeowners file to proper location
2020-04-07 03:22:43 +00:00
Preston Van Loon
70e64be8d6 Remove old cross compile starlark rules (#5329)
* Add buildbuddy BES (#5325)

* Add buildbuddy BES
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* remove old cross compile rules
2020-04-07 03:01:20 +00:00
Preston Van Loon
33ffa34ea7 Use less goroutines in validator runner (#5328)
* Add buildbuddy BES (#5325)

* Add buildbuddy BES
* Use less goroutines when running validator
* per-role based goroutines
* Merge branch 'master' into validator-issue-4702
2020-04-07 01:34:01 +00:00
Preston Van Loon
bcebf63cab Add buildbuddy BES (#5325)
* Add buildbuddy BES
2020-04-07 00:54:21 +00:00
terence tsao
d6f7d67ee9 Interop batch save validator indices (#5320)
* Batch save indices

* Update beacon-chain/interop-cold-start/service.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update beacon-chain/interop-cold-start/service.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update service.go

Co-authored-by: shayzluf <thezluf@gmail.com>
2020-04-06 13:40:42 -05:00
Chris Hobcroft
b7afc90266 Fixed typo in stdout log (#5317)
"Round robin**g** sync request failed"

changed to

"Round robin sync request failed"
2020-04-06 20:34:24 +08:00
Ivan Martinez
4b64a75c77 Remove unused validator protos (#5304)
* Remove unneeded protos

* Remove unused api point

* Gazelle

* Fix visibility

* Rename

* Change type

* Use iota for validator role
2020-04-06 11:24:24 +08:00
Ivan Martinez
fcf131412f Fix cluster in bazel and remove unused file (#5316)
* Fix cluster vazel

* remove unneeded file
2020-04-06 10:23:05 +08:00
Mattia
279dd5ac8d fix broken links in readme (#5313)
* fix broken links in readme

Activating a validator is referencing a broken link. This commit points to the new location of the documentation.
* Merge branch 'master' into master
2020-04-05 20:19:36 +00:00
Jim McDonald
c7a4fcd098 Reduce noise in validator logs (#5307)
* Reduce number of info-level messages on start of validator

* Test service, not log entry

* Gazelle

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-04-05 15:36:18 -04:00
Ivan Martinez
07753189fd Remove unused slashing protos (#5308) 2020-04-05 12:50:52 -04:00
terence tsao
e162d27634 Save init synced cached blocks to db (#5309) 2020-04-04 16:25:04 -07:00
Victor Farazdagi
f440c815f9 Init sync update highest slot (#5298)
* updates highest slot before wrapping up
* more verbose error message
* error w/o stack
* revert back
2020-04-04 17:11:38 +03:00
Preston Van Loon
9aac572c21 Update @terencechain public key. (#5301)
* Update @terencechain public key.
2020-04-03 22:01:44 +00:00
Preston Van Loon
7bdd1355b8 Add maligned struct static check (#5296)
* Add maligned static check
* Add file, oops
* lint
2020-04-03 05:09:15 +00:00
Preston Van Loon
477b014bd1 Set a max limit for decoding ssz objects from p2p (#5295)
* Set a max limit for decoding ssz objects from p2p
2020-04-02 23:51:54 +00:00
terence tsao
ec7f7aebdc Clear init sync blocks on the correct line (#5294)
* Add disable-init-sync-batch-save-blocks
* Fix test
* Remove flag
* Merge branch 'master' into disable-init-sync-batch-save
* Quick fix
* Quick fix
* Merge branch 'master' of github.com:prysmaticlabs/prysm into disable-init-sync-batch-save
* Merge branch 'disable-init-sync-batch-save' of github.com:prysmaticlabs/prysm into disable-init-sync-batch-save
* Clear init sync blocks at the right place
2020-04-02 22:27:51 +00:00
terence tsao
3544ed2818 Invert init-sync-batch-save-blocks flag for v0.11 (#5293) 2020-04-02 14:46:14 -07:00
Victor Farazdagi
b43e43b4a9 Init sync release queue (#5286)
* fix naming slot -> epoch

* better handling of long periods w/o finality

* bazel

* fixes issue with pointer goint to far ahead

* adds func comment

* hides original sync behind --disable-init-sync-queue

* adds func comment

* deprecated

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-04-02 11:06:32 -05:00
Victor Farazdagi
c26a492225 Init sync optimizations (#5284)
* fix naming slot -> epoch
* better handling of long periods w/o finality
* fixes issue with pointer going too far ahead
2020-04-02 06:54:05 +03:00
shayzluf
0df12261a1 slasher retrieve and cache validator public key (#5220)
* cache and retrieval of validator public keys

* fix comments

* fix comment

* fix variables

* gaz

* ivan feedback fixes

* goimports

* fix test

* comments on in line slice update

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-04-02 06:08:23 +03:00
Raul Jordan
f385a1dea6 Release Skip Slot Cache to All (#5280)
* no more skip slot cache

* imports

* deprecated

* fix flakeyness

* disable in e2e

* build

* fix viz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-04-01 19:09:54 -07:00
Preston Van Loon
8376fb36ca Enable slashing protection in validator by default (#5278)
* Invert slashing protection validator flags for issue #5267
* remove from e2e flags
* Make error level
* Merge refs/heads/master into flip-propose
2020-04-01 23:17:32 +00:00
Jim McDonald
02b238bda2 Add latch for proposer warnings (#5258)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-04-01 17:28:02 -05:00
Ivan Martinez
3dd5576e33 Improvement, flake fixes (#5263) 2020-04-01 10:23:23 -05:00
terence tsao
df9a534826 Regen historical states for new-state-mgmt compatibility (#5261) 2020-03-31 16:54:24 -07:00
Nishant Das
7e50c36725 Add Hasher To State Data Types (#5244)
* add hasher
* re-order stuff
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
* Merge refs/heads/master into addHasher
2020-03-31 18:57:19 +00:00
terence tsao
e22365c4a8 Uncomment out cold state tests (#5252)
* Fixed most of the tests

* All tests passing

* All tests passing

* Fix merge conflict

* Fixed error test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-31 11:23:39 -05:00
Nishant Das
c8f8e3f1e0 Unmarshal Block instead of State (#5246)
* unmarshal block instead of state
* add fallback
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
* Merge refs/heads/master into dontUnmarshal
2020-03-31 15:25:58 +00:00
Ivan Martinez
3e81afd7ab Skip anti-flake E2E tests (#5257)
* Skip anti-flake

* Log out the shard index to see it per shard

* Attempt fixes

* Remove unneeded log

* Change eth1 ports

* Remove skips

* Remove log

* Attempt local build

* Fix formatting

* Formatting

* Skip anti flake tests
2020-03-31 23:15:33 +08:00
Ivan Martinez
404a0f6bda Attempt E2E flaking fix (#5256)
* Fix test sharding

* Attempt fix
2020-03-31 12:39:56 +08:00
Preston Van Loon
00ef08b3dc Debug: add cgo symbolizer (#5255)
* Add cgo_symbolizer config

* Add comment

* use import block
2020-03-30 20:20:27 -07:00
Preston Van Loon
6edb3018f9 Add configurations for BLS builds (#5254)
* Add configurations for BLS builds
* Merge refs/heads/master into bls-configurations
2020-03-31 01:58:27 +00:00
Preston Van Loon
17516b625e Use math.Sqrt for IntegerSquareRoot (#5253)
* use std
* Merge refs/heads/master into faster-sqrt
2020-03-31 01:27:37 +00:00
terence tsao
7f7866ff2a Micro optimizations on new-state-mgmt service for initial syncing (#5241)
* Starting a quick PoC

* Rate limit to one epoch worth of blocks in memory

* Proof of concept working

* Quick comment out

* Save previous finalized checkpoint

* Test

* Minor fixes

* More run time fixes

* Remove panic

* Feature flag

* Removed unused methods

* Fixed tests

* E2e test

* comment

* Compatible with current initial sync

* Starting

* New cache

* Cache getters and setters

* It should be part of state gen

* Need to use cache for DB

* Don't have to use finalized state

* Rm unused file

* some changes to memory mgmt when using mempool

* More run time fixes

* Can sync to head

* Feedback

* Revert "some changes to memory mgmt when using mempool"

This reverts commit f5b3e7ff47.

* Fixed sync tests

* Fixed existing tests

* Test for state summary getter

* Gaz

* Fix kafka passthrough

* Fixed inputs

* Gaz

* Fixed build

* Fixed visibility

* Trying without the ignore

* Didn't work..

* Fix kafka

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-30 17:10:45 -05:00
terence tsao
c5f186d56f Batch save blocks for initial sync. 80% faster BPS (#5215)
* Starting a quick PoC
* Rate limit to one epoch worth of blocks in memory
* Proof of concept working
* Quick comment out
* Save previous finalized checkpoint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into batch-save
* Test
* Merge branch 'prev-finalized-getter' into batch-save
* Minor fixes
* Use a map
* More run time fixes
* Remove panic
* Feature flag
* Removed unused methods
* Fixed tests
* E2e test
* Merge branch 'master' into batch-save
* comment
* Merge branch 'master' into batch-save
* Compatible with current initial sync
* Merge branch 'batch-save' of github.com:prysmaticlabs/prysm into batch-save
* Merge refs/heads/master into batch-save
* Merge refs/heads/master into batch-save
* Merge refs/heads/master into batch-save
* Merge branch 'master' of github.com:prysmaticlabs/prysm into batch-save
* Feedback
* Merge branch 'batch-save' of github.com:prysmaticlabs/prysm into batch-save
* Merge refs/heads/master into batch-save
2020-03-30 18:04:10 +00:00
Ivan Martinez
0982ff124e Fix E2E test sharding (#5248) 2020-03-30 12:10:00 -04:00
Nishant Das
63df1d0b8d Add Merkleize With Customized Hasher (#5234)
* add buffer for merkleizer
* add comment
* Merge refs/heads/master into merkleize
* Merge refs/heads/master into merkleize
* Merge refs/heads/master into merkleize
* Merge refs/heads/master into merkleize
* Merge refs/heads/master into merkleize
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into merkleize
* Merge branch 'merkleize' of https://github.com/prysmaticlabs/geth-sharding into merkleize
* lint
* Merge refs/heads/master into merkleize
2020-03-29 06:13:24 +00:00
Ivan Martinez
cb9ac6282f Separate anti flakes to prevent E2E issues (#5238)
* Separate anti flakes

* Gaz
2020-03-29 13:54:13 +08:00
terence tsao
c67b01e5d3 Check new state mgmt service is compatible with DB (#5231) 2020-03-28 18:07:51 -07:00
terence tsao
b40e6db1e5 Fix save blocks return nil (#5237)
* Fixed save blocks return nil
* Merge refs/heads/master into fix-batch-save-blocks
* Merge refs/heads/master into fix-batch-save-blocks
2020-03-28 19:05:56 +00:00
Preston Van Loon
f89d753275 Add configurable e2e epochs (#5235)
* Add configurable e2e epochs
* Merge refs/heads/master into configurable-test-epochs
* Merge refs/heads/master into configurable-test-epochs
2020-03-28 18:47:31 +00:00
Preston Van Loon
a24546152b HashProto: Use fastssz when available (#5218)
* Use fastssz when available
* fix tests
* fix most tests
* Merge branch 'master' into faster-hash-proto
* Merge refs/heads/master into faster-hash-proto
* Merge refs/heads/master into faster-hash-proto
* Merge refs/heads/master into faster-hash-proto
* fix last test
* Merge branch 'faster-hash-proto' of github.com:prysmaticlabs/prysm into faster-hash-proto-2
* lint
* fix last test
* fix again
* Update beacon-chain/cache/checkpoint_state_test.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge refs/heads/master into faster-hash-proto
2020-03-28 18:32:11 +00:00
Preston Van Loon
6bc70e228f Prevent panic for different size bitlists (#5233)
* Fix #5232
* Merge branch 'master' into bugfix-5232
2020-03-28 06:25:49 +00:00
terence tsao
f2a3fadda7 Productionization new state service part 1 (#5230)
* Fixed last play methods

* Fixed a regression. Genesis case for state gen

* Comment

* Starting

* Update proto

* Remove boundary root usages

* Update migrate

* Clean up

* Remove unused db methods

* Kafta

* Kafta

* Update tests

* Comments

* Fix state summary tests

* Missed one pass through for kafta
2020-03-27 13:28:38 -07:00
terence tsao
6a4b17f237 Prune garbage state is not for new state mgmt (#5225)
* Prune garbage state is not for new state mgmt
* Merge branch 'master' into state-mgmt-pruning
* Merge branch 'master' into state-mgmt-pruning
* Merge branch 'master' into state-mgmt-pruning
2020-03-27 14:30:24 +00:00
Victor Farazdagi
7ebb3c1784 init-sync revamp (#5148)
* fix issue with rate limiting
* force fetcher to wait for minimum peers
* adds backoff interval
* cap the max blocks requested from a peer
* queue rewritten
* adds docs to fsm
* fix visibility
* updates fsm
* fsm tests added
* optimizes queue resource allocations
* removes debug log
* replace auto-fixed comment
* fixes typo
* better handling of evil peers
* fixes test
* minor fixes to fsm
* better interface for findEpochState func
2020-03-27 09:54:57 +03:00
Nishant Das
33f6c22607 Revert "Add Fast Copy of Trie" (#5228)
* Revert "new fixes (#5221)"

This reverts commit 4118fa5242.
2020-03-27 01:06:30 +00:00
terence tsao
1a0a399bed Handle genesis case for blocks/states at slot index (#5224)
* Handle highest slot = 0
* TestStore_GenesisState_CanGetHighestBelow
* TestStore_GenesisBlock_CanGetHighestAt
* Merge refs/heads/master into handle-genesis
2020-03-27 00:09:14 +00:00
Preston Van Loon
c4c9a8465a Faster hashing for attestation pool (#5217)
* use faster hash proto
* Merge branch 'master' into faster-att-pool
* gaz
* Merge branch 'faster-att-pool' of github.com:prysmaticlabs/prysm into faster-att-pool
* nil checks and failing tests
* Merge refs/heads/master into faster-att-pool
* Merge refs/heads/master into faster-att-pool
* Merge refs/heads/master into faster-att-pool
* Merge refs/heads/master into faster-att-pool
* Merge refs/heads/master into faster-att-pool
* Fix
* Merge branch 'faster-att-pool' of github.com:prysmaticlabs/prysm into faster-att-pool
* Fix tests
2020-03-26 23:55:25 +00:00
terence tsao
5e2faf1a9d Short circuit genesis condition for new state mgmt (#5223)
* Fixed a regression. Genesis case for state gen

* Comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-26 14:13:45 -05:00
shayzluf
93e68db5e6 is slashable attestation endpoint implementation (#5209)
* is slashable attestation endpoint implementation
* fix todo
* comment
* Merge refs/heads/master into is_slashable_attestation
* Merge refs/heads/master into is_slashable_attestation
* Merge refs/heads/master into is_slashable_attestation
* Update slasher/rpc/server.go
* Update slasher/rpc/server.go
* Update slasher/rpc/service.go
2020-03-26 18:31:20 +00:00
Nishant Das
cdac3d61ea Custom Block HTR (#5219)
* add custom htr

* fix root

* fix everything

* Apply suggestions from code review

* Update beacon-chain/state/stateutil/blocks.go

* Update beacon-chain/blockchain/receive_block.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* Update beacon-chain/blockchain/process_block.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* terence's review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-26 13:10:22 -05:00
Nishant Das
4118fa5242 new fixes (#5221)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-26 11:21:02 -05:00
terence tsao
2df76798bc Add HighestSlotStatesBelow DB getter (#5213)
* Add HighestSlotStatesBelow
* Tests for HighestSlotStatesBelow
* Typos
* Comment
* Merge refs/heads/master into states-slots-saved-at
* Quick fix
* Merge branch 'states-slots-saved-at' of github.com:prysmaticlabs/prysm into states-slots-saved-at
* Prevent underflow foreal, thanks nishant!
2020-03-26 15:37:40 +00:00
Preston Van Loon
3792bf67b6 Add alpine based docker images for validator and beacon chain (#5214)
* Add alpine based images for validator and beacon chain

* Use an alpine image with glibc

* manual tags on transitional targets

* poke buildkite

* poke buildkite
2020-03-25 19:36:28 -05:00
Nishant Das
e077d3ddc9 Fix Incorrect Logging for IPV6 Addresses (#5204)
* fix ipv6 issues
* Merge branch 'master' into fixIPV6
* imports
* Merge branch 'fixIPV6' of https://github.com/prysmaticlabs/geth-sharding into fixIPV6
* Merge branch 'master' into fixIPV6
2020-03-25 17:19:11 +00:00
Preston Van Loon
2ad5cec56c Add gRPC headers flag support for validator client side (#5203)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-25 11:29:04 -05:00
terence tsao
6b1e60c404 Remove extra udp port log (#5205)
* Remove extra udp port log
* Merge branch 'master' into rm-log
2020-03-25 15:28:29 +00:00
terence tsao
48e984f526 Add HighestSlotBlocksBelow getter for db (#5195)
* Add HighestSlotBlockAt

* Start testing

* Apply fixes

* Typo

* Test

* Rename for clarity

* Use length

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-25 08:41:48 -05:00
Preston Van Loon
fbee94a0e9 Remove deprecated aggregator (#5200) 2020-03-25 06:14:21 -07:00
Preston Van Loon
9740245ca5 Add enable-state-field-trie for e2e (#5198)
* Add enable-state-field-trie for e2e
* Merge refs/heads/master into e2e-enable-state-field-trie
* Merge refs/heads/master into e2e-enable-state-field-trie
* fix all this
* Update shared/sliceutil/slice.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* terence's review
* comment
* Merge branch 'e2e-enable-state-field-trie' of https://github.com/prysmaticlabs/geth-sharding into e2e-enable-state-field-trie
2020-03-25 06:54:56 +00:00
Preston Van Loon
48d4a8655a Add ipv6 multiaddr support (#5199)
* Add ipv6 multiaddr support
* allow ipv6 for discv5
2020-03-25 04:03:51 +00:00
terence tsao
e15d92df06 Apply fixes to block slots methods in DB (#5194)
* Apply fixes
* Typo
* Merge refs/heads/master into fix-slots-saved-for-blocks
2020-03-25 03:05:20 +00:00
Preston Van Loon
729bd83734 Add span to HTR and skip slot cache (#5197)
* Add a bit more span data
* missing import
* Merge branch 'master' into more-spans
2020-03-25 01:15:00 +00:00
terence tsao
c63fb2cd44 Add HighestSlotState Getter for db (#5192) 2020-03-24 14:51:24 -07:00
terence tsao
78a865eb0b Replace boltdb imports with bbolt import (#5193)
* Replaced. Debugging missing strict dependencies...
* Merge branch 'master' into bbolt-import
* Update import path
* Merge branch 'bbolt-import' of github.com:prysmaticlabs/prysm into bbolt-import
* use forked prombbolt
* Merge branch 'bbolt-import' of github.com:prysmaticlabs/prysm into bbolt-import
* fix
* remove old boltdb reference
* Use correct bolt for pk manager
* Merge branch 'bbolt-import' of github.com:prysmaticlabs/prysm into bbolt-import
* fix for docker build
* gaz, oops
2020-03-24 20:00:54 +00:00
shayzluf
6e516dabf9 Setup Slasher RPC server (#5190)
* slasher rpc server

* fix comment

* fix comment

* remove server implementation from pr

* Apply suggestions from code review

* Gazelle

* Update slasher/rpc/service.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update slasher/detection/detect.go

* Update slasher/detection/detect.go

* Update slasher/detection/detect.go

* Update slasher/detection/detect.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-24 14:30:21 -04:00
Nishant Das
454e02ac4f Add Improvements to State Benchmarks (#5187)
* add improvements

* clean up
2020-03-24 09:16:07 -05:00
Preston Van Loon
35d74981a0 Correctly return attestation data for late requests (#5183)
* Add functionality to support attestation requests that are older than the head state

* lint

* gaz

* Handle nil state case

* handle underflow of first epoch

* Remove redundant and possibly wrong genesisTime struct field

* fix remaining tests

* gofmt

* remove debug comment

* use stategen.StateByRoot interchangably with beaconDB.State

* gofmt

* goimports

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-23 21:30:28 -07:00
Ivan Martinez
d8bcd891c4 Change E2E config ports to be non-commonly used (#5184)
* Change config ports to be non-commonly used
2020-03-24 03:20:38 +00:00
terence tsao
2e0158d7c5 Add HighestSlotBlock Getter for db (#5182)
* Starting

* Update block getters in db

* New test

* New test for save blocks as well

* Delete blocks can clear bits tests

* Fmt
2020-03-23 18:42:41 -05:00
Ivan Martinez
bdb80f4639 Change ListAttestations to get attestations from blocks (#5145)
* Start fixing api to get from blocks

* Fix listatts tests

* Fix slasher

* Improve blocks

* Change att grouping

* Use faster att concat

* Try to fix e2e

* Change back time

* tiny e2e fix

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-23 16:22:37 -04:00
Preston Van Loon
0043fb0441 Shard //beacon-chain/core/state:go_default_test (#5180)
* Add shards to core state tests, some of the fuzzing tests can be very slow
* Merge refs/heads/master into shard-core-state-tests
2020-03-23 19:08:39 +00:00
Preston Van Loon
f520472fc6 Buildkite changes (#5178)
* Do not override jobs, dont print colors
* Merge branch 'master' of github.com:prysmaticlabs/prysm into buildkite-changes
* use composite flag for minimal downloads
* Add repository cache
* use hardlinks
* repository cache common
* query and build repo cache
2020-03-23 19:00:37 +00:00
Preston Van Loon
5241582ece Add CORS preflight support (#5177)
* Add CORS preflight support

* lint

* clarify description
2020-03-23 13:17:17 -05:00
Nishant Das
b0128ad894 Add Attestation Subnet Bitfield (#4989)
* bump bitfield dep

* add new methods

* get it working

* add nil check

* add check

* one more check

* add flag

* everything works local run

* add debug log

* more changes

* ensuring p2p interface works enough for tests to pass

* all tests pass

* include proper naming and comments to fix lint

* Apply suggestions from code review

* discover by peers

* cannot figure out why 0 peers

* remove keys

* fix test

* fix it

* fix again

* remove log

* change back

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-23 09:41:47 -05:00
Preston Van Loon
5d1c3da85c BLS: some minor improvements (#5161)
* some improvements
* gofmt
* Merge refs/heads/master into bls-improvements
* Merge refs/heads/master into bls-improvements
* Merge refs/heads/master into bls-improvements
2020-03-22 23:40:39 +00:00
terence tsao
301c2a1448 New byteutils for state gen (#5163)
* New byteutils for state gen
* Added empty slice and nil checks
* Merge branch 'master' into bit-utils
* SetBit to extend bit
* Merge branch 'bit-utils' of github.com:prysmaticlabs/prysm into bit-utils
* Comment
* Add HighestBitIndexBelow
* Test for HighestBitIndexBelow
* another test and better test fail output
* Feedback
* Merge branch 'bit-utils' of github.com:prysmaticlabs/prysm into bit-utils
* Feedback
* Preston's feedback, thanks!
* Use var
* Use var
* Merge refs/heads/master into bit-utils
2020-03-22 23:19:38 +00:00
Ivan Martinez
bc3d673ea4 Parallelize E2E Testing (#5168)
* Begin cleanup for E2E

* Parellize testing

* Add comments

* Add comment
2020-03-22 19:04:23 -04:00
Preston Van Loon
3d092d3eed Update go-bitfield (#5162)
* Update go-bitfield from https://github.com/prysmaticlabs/go-bitfield/pull/28
2020-03-22 04:51:06 +00:00
Preston Van Loon
4df5c042d9 Use faster bitfield BitIndices to build attesting indices (#5160)
* Refactor AttestingIndices to not return any error. Add tests. Add shortcut for fully attested attestation
* attestationutil.ConvertToIndexed never returned error either
* Working with benchmark:
* fix test
* Merge branch 'attestationutil-improvements-0' into attestationutil-improvements-1
* out of bounds check
* Update after merge of https://github.com/prysmaticlabs/go-bitfield/pull/26
* remove shortcut
* Merge refs/heads/attestationutil-improvements-0 into attestationutil-improvements-1
* Merge branch 'attestationutil-improvements-0' into attestationutil-improvements-1
* Merge branch 'attestationutil-improvements-1' of github.com:prysmaticlabs/prysm into attestationutil-improvements-1
* revert test...
* Merge refs/heads/attestationutil-improvements-0 into attestationutil-improvements-1
* Merge branch 'master' of github.com:prysmaticlabs/prysm into attestationutil-improvements-1
* Merge branch 'attestationutil-improvements-1' of github.com:prysmaticlabs/prysm into attestationutil-improvements-1
* Update go-bitfield after https://github.com/prysmaticlabs/go-bitfield/pull/27
2020-03-22 01:42:51 +00:00
Preston Van Loon
d06b0e8a86 Refactor attestationutil.AttestingIndices (#5159)
* Refactor AttestingIndices to not return any error. Add tests. Add shortcut for fully attested attestation
* attestationutil.ConvertToIndexed never returned error either
* fix test
* remove shortcut
* revert test...
2020-03-22 00:23:37 +00:00
Jim McDonald
4f8d9c59dd Replace default value for datadir (#5147) 2020-03-21 23:30:51 +08:00
Ivan Martinez
021d777b5e Add Anti-Flake test for E2E (#5149)
* Add antiflake test

* Respond to comments

* Comment

* Change issue num
2020-03-21 14:42:51 +08:00
terence tsao
dc3fb018fe Fix new state mgmt sync stuck in a loop (#5142) 2020-03-19 18:46:35 -07:00
Preston Van Loon
2ab4b86f9b Allow setting flags via yaml config file. (#4878) 2020-03-19 14:46:44 -07:00
Ivan Martinez
b30a089548 Add fetching validators by indices and public keys (#5141)
* update ethereumapis with patch
* Add indices and pubkeys to ListValidators request
* Add sorting
* Merge branch 'master' into validators-by-keys-indices
* Rename to index
* Merge branch 'validators-by-keys-indices' of https://github.com/prysmaticlabs/prysm into validators-by-keys-indices
* Add comment
2020-03-19 20:30:40 +00:00
Ivan Martinez
271938202e Improve validator logs (#5140)
* Imporve validator logging

* Update validator/client/validator_log.go
2020-03-19 13:34:50 -04:00
shayzluf
6fe814c5aa double proposal detector (#5120)
* proposal detector

* comment fixes

* comment fixes

* raul feedback

* fix todo

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-19 17:29:35 +05:30
Preston Van Loon
a9f4d1d02d Attestation: Add a check for overflow (#5136)
* Add a check for overflow
* gofmt beacon-chain/cache/committee_test.go
2020-03-19 04:41:05 +00:00
Preston Van Loon
7c110e54f0 Add ssz marshal and unmarshal for most data structures (#5121)
* Add ssz marshal and unmarshal for most data structures
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Merge refs/heads/master into ssz-stuff
* Update ferran SSZ
* Update ferran's SSZ
* Merge refs/heads/master into ssz-stuff
* fix tests
* Merge branch 'ssz-stuff' of github.com:prysmaticlabs/prysm into ssz-stuff
* gaz
2020-03-19 02:39:23 +00:00
Raul Jordan
3043d4722f Attestation Dynamic Committee Subnets (#5123)
* initiate cache
* imports fix
* add in feature config flag
* utilize a dynamic set of subnets
* Merge branch 'master' into att-subnets
* add in feature config flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* shift
* more changes
* gaz
* Update beacon-chain/rpc/validator/assignments.go
* Update beacon-chain/rpc/validator/assignments.go
* add flag
* Merge branch 'att-subnets' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* Merge branch 'master' into att-subnets
* Merge refs/heads/master into att-subnets
* no double flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* amend committee ids to better name
* gaz
2020-03-18 23:13:37 +00:00
Ivan Martinez
c96c8b4aa3 Minor slasher fixes (#5129)
* Minor fixes

* Change log
2020-03-18 14:49:20 -05:00
Nishant Das
9f46000dba change to batch size (#5128)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-18 17:57:20 +08:00
Nishant Das
5450b3155e Integrate Field Tries into Current State (#5082)
* add new methods
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* new field trie
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* perform better copying
* fix bug
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* add support for variable length arrays
* get it running
* save all new progress
* more fixes
* more fixes
* more cleanup
* some more clean up
* new memory pool
* remove lock
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* use wrapper
* remove redundant methods
* cleanup
* cleanup
* remove unused method
* change field
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
2020-03-18 04:52:08 +00:00
Nishant Das
1bb12c3568 Add Field Trie to State (#5118)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

* add in new tests

* fix all tests

* Apply suggestions from code review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-18 10:09:31 +08:00
Ivan Martinez
1be8b3aa5e Slasher lag fix (#5124)
* Slasher fixes

* fix
2020-03-17 16:53:08 -05:00
Nishant Das
431762164e Add New State Utils (#5117)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-17 14:25:17 -05:00
Victor Farazdagi
3ec2a0f9e0 Refactoring of initial sync (#5096)
* implements blocks queue

* refactors updateCounter method

* fixes deadlock on stop w/o start

* refactors updateSchedulerState

* more tests on schduler

* parseFetchResponse tests

* wraps up tests for blocks queue

* eod commit

* fixes data race in round robin

* revamps fetcher

* fixes race conditions + livelocks + deadlocks

* less verbose output

* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* minor fix to round robin

* moves original round robin into its own package

* adds enableInitSyncQueue flag

* fixes issue with init-sync service selection

* Update beacon-chain/sync/initial-sync/round_robin.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* initsyncv1 -> initsyncold

* adds span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-17 12:27:18 -05:00
Victor Farazdagi
e96b45b29c asserts non-nil state (#5115) 2020-03-17 07:58:16 -07:00
terence tsao
e529f5b1d6 Part 1 of integrating new state mgmt to run time (#5108) 2020-03-16 12:07:07 -07:00
Victor Farazdagi
f18bada8c9 Init sync blocks queue (#5064)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* Update beacon-chain/sync/initial-sync/blocks_queue.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update beacon-chain/sync/initial-sync/blocks_queue_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-16 18:21:36 +03:00
terence tsao
5657535c52 Fixed saveNewValidators error log (#5109) 2020-03-15 16:21:56 -07:00
Preston Van Loon
9da9fbdfba Fix reward and penality zero epoch bug (#5107)
* Fix reward and penality bug https://github.com/prysmaticlabs/prysm/issues/5105
* Merge branch 'master' into fuzz-fix-attestationDelta
2020-03-15 19:14:52 +00:00
Ivan Martinez
de2ec8e575 Update README for Slasher (#5106)
* Add readme
2020-03-15 18:46:21 +00:00
terence tsao
3660732f44 Resume new state mgmt (#5102) 2020-03-15 09:47:49 -07:00
Jim McDonald
8e6c16d416 Tweak validator logging (#5103)
* Tidy up logging
2020-03-15 15:46:22 +00:00
Ivan Martinez
8143cc36bc Add Slasher to E2E (#5061)
* Start adding "inject slashing into pool"

* Attempt at slashing

* Remove unneded

* Fix

* Begin adding slasher client to e2e

* Start slasher in e2e

* Get slashing detection working

* Get slashing evaluators working

* Progress on e2e

* Cleanup e2e

* Fix slasher e2e!

* lint

* Comment

* Fixes

* Improve accuracy of balance check

* REmove extra

* Remove extra

* Make more accurate
2020-03-15 01:09:23 -04:00
terence tsao
eeffa4fb30 New state getter (#5101)
* getter.go
* getter_test.go
* fixed a cold bug
* fmt gaz
* All tests pass
* Merge branch 'master' into new-state-getter
* Merge refs/heads/master into new-state-getter
2020-03-14 18:39:23 +00:00
Victor Farazdagi
1137403e4b Init sync pre queue (#5098)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-14 13:21:07 -05:00
terence tsao
f17818b1c0 New state setter (#5100)
* setter.go
* tests
* fmt
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-state-setter
* Merge refs/heads/master into new-state-setter
2020-03-14 16:31:21 +00:00
Nishant Das
691f0bba70 Minor Improvements (#5099)
* add fixes
2020-03-14 16:12:22 +00:00
terence tsao
b024191887 Get cold intermediate state with slot (#5097)
* loadColdIntermediateStateWithSlot

* Starting test

* Tests
2020-03-14 10:34:37 -05:00
Raul Jordan
1f87cb11fc Use Current Time Slot for Fetching Committees in RPC (#5094)
* use genesis time fetcher
* Merge branch 'master' into use-time-fetcher
* fix breaking
* Merge branch 'use-time-fetcher' of github.com:prysmaticlabs/prysm into use-time-fetcher
* list beacon committees tests fixed
* Merge branch 'master' into use-time-fetcher
* Merge branch 'master' into use-time-fetcher
* Merge refs/heads/master into use-time-fetcher
* Update beacon-chain/rpc/beacon/committees_test.go
2020-03-14 03:32:51 +00:00
Preston Van Loon
a0b142a26c Update to go 1.14 (#4947)
* Update to go 1.14
* Update with fix from https://github.com/bazelbuild/rules_go/pull/2388
* Merge branch 'master' into go-1.14
* Merge refs/heads/master into go-1.14
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Update gRPC
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Update golang.org/x/crypto
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Committing gc_goopts for issue repro
* Fix race and msan builds
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Merge refs/heads/master into go-1.14
* switch to LRU
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Fixed, but dont feel good about this
* Switch append ordering
2020-03-14 00:12:52 +00:00
shayzluf
035eaffd9d handle slashing from p2p (#5047)
* handle slashing from p2p

* gaz

* remove unneeded check

* add tests

* gaz  goimports

* text update

* Apply suggestions from code review

* add proto.equal

* fix test

* add context to call

* fix state bug found by terence

* fix tests add error type handling

* nil checks

* nil head state check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 16:47:27 -05:00
Ivan Martinez
c41244ad34 Make spanner tests more thorough, fixes (#5093)
* Fix tests for spanner

* Start change to indexed atts

* Improve tests for spanner

* Fix tests

* Remove extra
2020-03-13 14:04:22 -04:00
terence tsao
c20d9ccbb3 Better attestation pool with map instead of expiration cache (#5087)
* update aggregated

* update block att

* update forkchoice att

* update unaggregated att

* gazelle

* Use copy

* Locks

* Genesis time

* Fixed tests

* Comments

* Fixed tests
2020-03-13 12:35:28 -05:00
Ivan Martinez
3380d14475 Include ejected indices in ActiveSetChanges endpoint (#5066)
* Fix ActiveSetChanges

* Include ejected indices in ActiveSetChanges RPC

* Fix test fails

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 12:23:19 -04:00
shayzluf
4f031d1988 fix slasher rpc disconnect on error (#5092) 2020-03-13 10:59:14 -05:00
Jim McDonald
02afb53ea4 Remove suprious error messages in wallet keymanager (#5090)
* Handle multiple passphrases

* Add tests
2020-03-13 05:26:10 -07:00
terence tsao
0974c02a00 Load cold state by root (#5086) 2020-03-12 15:27:55 -07:00
Raul Jordan
c6acf0a28c Use Target Epoch to Determine Indexed Attestations for Slasher (#5085)
* no more head fetchre

* no need for head fetcher

* nil checks
2020-03-12 17:02:12 -05:00
terence tsao
ed7ad4525e Method to retrieve block slot via block root (#5084)
* blockRootSlot

* Tests

* Gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 16:04:24 -05:00
terence tsao
7fcc07fb45 Save hot state (#5083)
* loadEpochBoundaryRoot
* Tests
* Span
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Starting test
* Tests
* Merge refs/heads/master into save-hot-state
* Merge branch 'master' into save-hot-state
* Use copy
* Merge branch 'save-hot-state' of https://github.com/prysmaticlabs/prysm into save-hot-state
* Merge refs/heads/master into save-hot-state
2020-03-12 20:48:07 +00:00
shayzluf
f937713fe9 Broadcast slashing (#5073)
* add flag
* broadcast slashings
* Merge branch 'master' of github.com:prysmaticlabs/prysm into broadcast_slashing

# Conflicts:
#	beacon-chain/rpc/beacon/slashings_test.go
* fix tests
* goimports
* goimports
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
* Merge branch 'master' into broadcast_slashing
2020-03-12 20:29:23 +00:00
terence tsao
359e0abe1d Load epoch boundary root (#5079)
* loadEpochBoundaryRoot

* Tests

* Span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-12 15:00:37 -05:00
tzapu
0704ba685a Return statuses on duties (#5069)
* try to return somethign for everything
* default to unknown
* debug
* moar debug
* move else to outer check
* working
* reorder imports
* cleanup
* fix TestGetDuties_NextEpoch_CantFindValidatorIdx
* Merge branch 'master' into return-statuses-on-duties
* Update validator/client/validator.go
* Merge branch 'master' into return-statuses-on-duties
* Merge branch 'master' into return-statuses-on-duties
2020-03-12 19:07:37 +00:00
shayzluf
0f95b797af Save slashings to slasher DB (#5081)
* fix tests add error type handling

* Update slasher/detection/detect_test.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>

* goimports

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-03-12 22:08:58 +05:30
terence tsao
43722e45f4 Save cold state (#5077) 2020-03-12 05:58:06 -07:00
terence tsao
ff4ed413a3 State migration from hot to cold (archived) (#5076)
* Starting

* Test

* Tests

* comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-11 21:27:16 -05:00
Raul Jordan
f1a42eb589 Verify Slashing Signatures Before Putting Into Blocks (#5071)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* verify slashing
* added in test for pending att slashing
* tests starting to apss
* sig failed verify regression test
* tests passing for ops pool
* Update beacon-chain/operations/slashings/service.go
* Merge refs/heads/master into verify-slash-sig
* verify on insert
* tests starting to pass
* all code paths fixed
* imports
* fix build
* fix rpc errors
* Merge refs/heads/master into verify-slash-sig
2020-03-12 01:16:55 +00:00
terence tsao
a90ffaba49 Archived point retrieval and recovery (#5075) 2020-03-11 17:38:30 -07:00
Raul Jordan
663d919b6f Include Bazel Genrule for Fast SSZ (#5070)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* included new ssz bzl rule
* Merge branch 'master' into add-in-starlark-rule
* Update tools/ssz.bzl

Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-11 19:50:22 +00:00
Victor Farazdagi
7b30845c01 fixes races in blocks fetcher (#5068) 2020-03-11 14:21:41 +03:00
Victor Farazdagi
46eb228379 fixes data race in state.Slot (#5067)
* fixes data race in state/getters
2020-03-11 09:11:07 +00:00
Raul Jordan
8d3fc1ad3e Add in Slasher Metrics (#5060)
* added in slasher metrics
* Merge branch 'master' into slasher-metrics
* add in prom bolt metrics for slasher
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
* imports
* include all metrics
* no dup bolt collector
* Update slasher/detection/attestations/spanner.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* naming best practices for prom, thx Terence
* Merge branch 'slasher-metrics' of github.com:prysmaticlabs/prysm into slasher-metrics
2020-03-10 19:41:55 +00:00
Nishant Das
93195b762b Improve HTR of State (#5058)
* add cache
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/blocks.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/stateutil/hash_function.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Merge branch 'master' into improveHTR
* add back string casting
* fix imports
2020-03-10 16:26:54 +00:00
Jim McDonald
f0abf0d7d5 Reduce frequency of 'eth1 client not syncing' messages (#5057) 2020-03-10 09:51:41 -05:00
Nishant Das
9d27449212 Discovery Fixes (#5050)
* connect to dv5 bootnodes

* fix test

* change polling period

* ignore

* Update beacon-chain/p2p/service.go

* Update beacon-chain/p2p/service_test.go

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-09 19:53:37 -07:00
Preston Van Loon
edb6590764 Build herumi's BLS from source (#5055)
* Build herumi from source. Working so far on linux_amd64 for compile, but tests fail to initialize the curve appropriately

* Add copts to go_default_library

* llvm toolchain, still WIP

* Fixes, make llvm a config flag

* fix gazelle resolution

* comment

* comment

* update herumi to the v0.9.4 version

* Apply @nisdas patch from https://github.com/herumi/bls-eth-go-binary/pull/5
2020-03-09 21:22:41 -05:00
Raul Jordan
e77cf724b8 Better Nil Check in Slasher (#5053)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* some nil checks in slasher
2020-03-09 21:21:39 +00:00
Ivan Martinez
b633dfe880 Change detection and updating in Slasher to per attestation (#5043)
* Change span updates to update multiple validators at once

* Change detection to perform on multiple validators at once

* Fix minspan issue

* Fix indices

* Fix test

* Remove logs

* Remove more logs

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/detect.go

* nil check

* fix ununsed import

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 13:14:19 -05:00
Ivan Martinez
8334aac111 Batch saving of attestations from stream for slasher (#5041)
* Batch saving of attestations from stream for slasher

* Progress on test

* Fixes

* Fix test

* Rename

* Modify logs and timing

* Change

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-09 12:49:40 -05:00
Preston Van Loon
4c1e2ba196 Add prysm.sh script (#5042)
* Add prysm.sh script

* Add dist to gitignore

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-09 12:19:53 -05:00
terence tsao
25c13663d2 Add hot state by slot retrival (#5052)
* Update replay conditions

* loadHotStateBySlot

* Tests and gaz

* Tests
2020-03-09 11:22:45 -05:00
Jim McDonald
0c3af32274 Use BeaconBlockHeader in place of BeaconBlock (#5049) 2020-03-09 21:08:30 +08:00
shayzluf
01cb01a8f2 On eviction test fix (#5046) 2020-03-09 01:35:39 -04:00
Raul Jordan
0c9e99e04a Aggregate Attestations Before Streaming to Slasher (#5029)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* aggregate before streaming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* collect atts and increase buffer size
* fix test for func
* Merge refs/heads/master into agg-idx-atts
* Update beacon-chain/rpc/beacon/attestations.go
* Merge refs/heads/master into agg-idx-atts
* naming
* Merge branch 'agg-idx-atts' of github.com:prysmaticlabs/prysm into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* comment terence feedback
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Merge refs/heads/master into agg-idx-atts
* Fix tests
2020-03-08 21:39:54 +00:00
Ivan Martinez
d4cd51f23e Change slasher cache to LRU cache (#5037)
* Change cache to LRU cache

* fixes

* REduce db usage

* Fix function name

* Merge issues

* Save on eviction

* Fixes

* Fix

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-08 17:11:59 -04:00
terence tsao
962fe8552d Compute state up to slot (#5035) 2020-03-08 21:41:24 +01:00
Raul Jordan
eddaea869b Prepare Slasher for Production (#5020)
* rem slasher proto
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* add a bit more better logging
* Empty db fix
* Improve logs
* Fix small issues in spanner, improvements
* Change costs back to 1 for now
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Change the cache back to 0
* Cleanup
* Merge branch 'master' into cleanup-slasher
* lint
* added in better spans
* log
* rem spanner in super intensive operation
* Merge branch 'master' into cleanup-slasher
* add todo
* Merge branch 'cleanup-slasher' of github.com:prysmaticlabs/prysm into cleanup-slasher
* Merge branch 'master' into cleanup-slasher
* Apply suggestions from code review
* no logrus
* Merge branch 'master' into cleanup-slasher
* Merge branch 'cleanup-slasher' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* Remove spammy logs
* Merge branch 'master' of https://github.com/prysmaticlabs/Prysm into cleanup-slasher
* gaz
* Rename func
* Add back needed code
* Add todo
* Add span to cache func
2020-03-08 17:56:43 +00:00
Nishant Das
300d072456 Add Config Change for Validator (#5038)
* add config for validator
* gaz
* Merge refs/heads/master into configureValidator
* Merge refs/heads/master into configureValidator
2020-03-08 06:45:36 +00:00
Nishant Das
ac1c92e241 Add Prometheus Service for Slasher (#5039)
* add prometheus service
* Update slasher/node/node.go

Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Merge refs/heads/master into addPromServiceSlasher
2020-03-08 06:35:37 +00:00
terence tsao
2452c7403b Load hot state by root (#5034)
* Add loadHotStateByRoot

* Touchup loadHotStateByRoot

* Tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-08 14:24:57 +08:00
Preston Van Loon
b97e22107c Update rbe_autoconf (#5036)
* Update rbe_autoconf
* Update timestamps
2020-03-07 21:18:16 +00:00
Preston Van Loon
98faf95943 Define -c opt for release builds (#5033)
* define -c opt for release builds
* Merge branch 'master' into c-opt
2020-03-07 05:50:26 +00:00
Preston Van Loon
af28862e94 Add sha256 to external dependency librdkafka (#5032)
* Add sha256 to external dependency librdkafka
2020-03-07 05:31:07 +00:00
Jim McDonald
b133eb6c4a Warn rather than fail on incorrect keystore password (#5024)
* Warn on failure to decrypt a keystore validator

* Update test

* Update tools

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-06 23:05:48 -06:00
Nishant Das
345ec1bf8c Fix Custom Delay Flag (#5026)
* fix flag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* Merge refs/heads/master into fixFlag
* fix config
* Merge branch 'fixFlag' of https://github.com/prysmaticlabs/geth-sharding into fixFlag
2020-03-07 03:52:40 +00:00
Nishant Das
d1fea430d6 change limit (#5027)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 17:26:08 -06:00
terence tsao
054b15bc45 Add SlotsPerArchivedPoint flag and a check (#5023)
* Flag

* Service

* Tests

* Tests and comments

* Lint

* Add to usages

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 17:06:01 -06:00
Preston Van Loon
6a2955d43c Update bazel.sh (#5028)
* Add google auth creds as environment variable for CI. Add a comment why this script is helpful
* Add google auth creds as environment variable for CI. Add a comment why this script is helpful
2020-03-06 17:43:06 +00:00
Jim McDonald
0ecd83afbb Avoid crash due to invalid index (#5025)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-06 09:38:43 -06:00
Nishant Das
069f2c5fb6 Asynchronous Dials To Peers (#5021)
* make dial non-blocking

* add sleep

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-06 22:57:47 +08:00
Raul Jordan
acb15a1f04 Report Validator Status in Duties (#5017)
* fix up status reporting
* Merge refs/heads/master into properly-report-status
* Merge refs/heads/master into properly-report-status
* Merge refs/heads/master into properly-report-status
2020-03-06 06:18:14 +00:00
Preston Van Loon
e2af70f692 Run buildifer, remove duplicated WORKSPACE entries (#5018)
* Buildifier, add release config
* Merge branch 'master' into bazel-stuff
* Merge refs/heads/master into bazel-stuff
* Merge refs/heads/master into bazel-stuff
* revert gnostic
* Set kafka for CI tests only
* add bazel.sh script
* set home
2020-03-06 04:42:27 +00:00
460 changed files with 18024 additions and 10379 deletions

View File

@@ -30,6 +30,20 @@ build --define kafka_enabled=false
test --define kafka_enabled=false
run --define kafka_enabled=false
# Release flags
build:release --workspace_status_command=./scripts/workspace_status.sh
build:release --stamp
build:release --compilation_mode=opt
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------

View File

@@ -20,15 +20,16 @@ build:remote-cache --strategy=Genrule=standalone
build:remote-cache --disk_cache=
build:remote-cache --host_platform_remote_properties_override='properties:{name:\"cache-silo-key\" value:\"prysm\"}'
build:remote-cache --remote_instance_name=projects/prysmaticlabs/instances/default_instance
build:remote-cache --experimental_remote_download_outputs=minimal
build:remote-cache --experimental_inmemory_jdeps_files
build:remote-cache --experimental_inmemory_dotd_files
build:remote-cache --remote_download_minimal
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
@@ -36,17 +37,15 @@ build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc
build --show_progress_rate_limit=5
build --curses=yes --color=yes
build --curses=yes --color=no
build --keep_going
build --test_output=errors
build --flaky_test_attempts=5
build --jobs=50
build --stamp
test --local_test_jobs=2
# Disabled race detection due to unstable test results under constrained environment build kite
# build --features=race
# Enable kafka for CI and docker images
build --define kafka_enabled=true
# Enable kafka for CI tests only.
test --define kafka_enabled=true
run --define kafka_enabled=true
build --bes_backend=grpcs://builds.prylabs.net:1985
build --bes_results_url=https://builds.prylabs.net/invocation/

2
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,2 @@
# Automatically require code review from core-team.
* @prysmaticlabs/core-team

4
.gitignore vendored
View File

@@ -17,6 +17,7 @@ bazel-*
# Coverage outputs
coverage.txt
profile.out
profile.grind
# Nodejs
node_modules
@@ -29,3 +30,6 @@ password.txt
# go dependancy
/go.mod
/go.sum
# Dist files
dist

View File

@@ -4,23 +4,23 @@ Hash: SHA512
Contact: mailto:security@prysmaticlabs.com
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
Encryption: openpgp4fpr:341396BAFACC28C5082327F889725027FC8EC0D4
Encryption: openpgp4fpr:8B7814F1B221A8E8AA465FC7BDBF744ADE1A0033
Encryption: openpgp4fpr:FEE44615A19049DF0CA0C2735E2B7E5734DFADCB
Preferred-Languages: en
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAlzi0WgACgkQcuM+TfGl
A241pw/+Ks3Hxx8eGbjRIeuncuK811FkCiofNJS+MY2p4W2/tIrk48DtLRx8/k5L
Dh1QyypZsqUgofrK7PbGVdEin6oEb2jYbTWUarAVTbhlsUdM4YcxwpgmGVslW7+C
Hm8wMasQZhCkFfakzhfKX5hIQoFaFI/OvtVKIQsodP8dAieCDaGmtfq1Bs1LgFqi
KrpeEdC2XbBQs33ADheC5SdGT1mnatP3VX8cOhLsfoPksYgTSpwK0clkoWs1eZOQ
l1ImfW/FJCpSndBWgBR503ZgaU3Ic+5qxmAIuUP4chl0DFRMlPFEM5OWC6JkkCOd
5kKrXGRmrhgtQg+pA3zqJnFItRj7gxPBA/ypxCkKPrLEkRvbdpdZEl5vAlYkeBL6
iKSLHnMswGKldiYxy7ofam5bM3myhYYNFb25boV5pRptrnoUmWOACHioBGQHwWNt
B0XktD0j7+pCCiJyyYxmOnElsk/Y/u4Tv5pYWvfFuxTF2XOg+P/EH64AIFLWgB1U
VnITxhakxqejCBxZkuVCFNSzt+TXG0NS9EIj/UOYBY+wxrBZ62ITjdA16RS/3n3z
DuIDtxOOwUumbOO32+a5zIb+ARmnocYJviI7FuENb01/U6qb+nm9hQI6oIpSCNsv
Pb4O/ZlOx70U/7mt4Xn/dTKH9bnKOOVhOw00KJWFfAce73AVnLA=
=Uhqg
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl6HrcwACgkQcuM+TfGl
A26voQ/8DFB5wUHP0uyY8k7FGbxhLzSeImxomnUHJaUGfdczbCdYPMEHc9zI1iZP
6LRiy9wS6qhqj/GSKVwvDPr+ZymXuV3L22GOP2lRhl7Z9Mm21ZJNOcoQBFOZnyHu
DAy9HeTmeuJxYkf8weqZYXyzEoKJBDmfuWmEFjrtMcFXUfT3aJn1E2A/AQdcVQIC
9L+iGWwFwjsPhcfaMuwcB7QMheDO6KSB7XPPCbrZ036Np8UTZ4qbZ5y73tlfkcOc
tYTrMSPtS4eNutiDOP5Np36cLzRtNpm/BziAK+7ZKiYY0HI5h9IkCTLO4x2UmAMX
sPoeaAB5z2QLIwmU9J2NhJrwiNMGTpJ+0bowy8U4cgzAX20CXVjRqGhy+cir8Ewg
DjEGjWINUw6W0yzJp0mKSKzuOhdTTmzIYBeMBsyce+pgN1KGFCxeIwxGxyJzADdw
mYQdljRXn4yEYP/KEpu/F2o8L4ptRO2jZWKvTvdzSSGGSyKyF4HsIRJ7m98DaB6S
0oGq1KpbKKTbQi5g8UShGV2gPeMCs5ZIIqK2b/cRzUet18aUuofLmR4lkKZa9yEG
rbzuJq/gB2vgQwExUEgVQ3/DfVc+y80e3YZ5s+rzV0vbLxl4Gh4yExpLo7hRf9iY
EFvMzH+BEEb5VfCwByZyV1BmesZVIosr7K6UmVtPe0bZGvv3uIg=
=5qpD
-----END PGP SIGNATURE-----

View File

@@ -4,7 +4,6 @@ load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("//tools:binary_targets.bzl", "binary_targets", "determine_targets")
prefix = "github.com/prysmaticlabs/prysm"
@@ -104,40 +103,15 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/assign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library",
],
)
assemble_versioned(
name = "assemble-versioned-all",
tags = ["manual"],
targets = [
":assemble-{}-{}-targz".format(
pair[0],
pair[1],
)
for pair in binary_targets
],
version_file = "//:VERSION",
)
common_files = {
"//:LICENSE.md": "LICENSE.md",
"//:README.md": "README.md",
}
[assemble_targz(
name = "assemble-{}-{}-targz".format(
pair[0],
pair[1],
),
additional_files = determine_targets(pair, common_files),
output_filename = "prysm-{}-{}".format(
pair[0],
pair[1],
),
tags = ["manual"],
) for pair in binary_targets]
toolchain(
name = "built_cmake_toolchain",
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",

View File

@@ -173,7 +173,7 @@ Each validator represents 3.2 Goerli ETH being staked in the system, and it is p
### Activating your validator: depositing 3.2 Göerli ETH
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](https://docs.prylabs.network/docs/activating-a-validator)section of this documentation.
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](https://docs.prylabs.network/docs/prysm-usage/activating-a-validator)section of this documentation.
It will take a while for the nodes in the network to process a deposit. Once the node is active, the validator will immediately begin performing its responsibilities.

219
WORKSPACE
View File

@@ -13,6 +13,28 @@ http_archive(
],
)
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
bazel_toolchain_dependencies()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "9.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
configure_prysm_toolchains()
@@ -30,10 +52,10 @@ http_archive(
http_archive(
name = "bazel_gazelle",
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
sha256 = "d8c45ee70ec39a57e7a05e5027c32b1576cc7f16d9dd37135b0eddde45cf1b10",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
],
)
@@ -53,10 +75,10 @@ http_archive(
http_archive(
name = "io_bazel_rules_go",
sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b",
sha256 = "e6a6c016b0663e06fa5fccf1cd8152eab8aa8180c583ec20c872f4f9953a7ac5",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
],
)
@@ -70,7 +92,7 @@ git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1563544980 +0300",
shallow_since = "1569509514 +0300",
)
# Override default import in rules_go with special patch until
@@ -84,7 +106,7 @@ git_repository(
"//third_party:com_github_gogo_protobuf-equal.patch",
],
remote = "https://github.com/gogo/protobuf",
shallow_since = "1567336231 +0200",
shallow_since = "1571033717 +0200",
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
@@ -95,6 +117,22 @@ load(
container_repositories()
load(
"@io_bazel_rules_docker//container:container.bzl",
"container_pull",
)
container_pull(
name = "alpine_cc_linux_amd64",
digest = "sha256:d5cee45549351be7a03a96c7b319b9c1808979b10888b79acca4435cc068005e",
registry = "index.docker.io",
repository = "frolvlad/alpine-glibc",
)
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
@@ -114,15 +152,8 @@ load(
_go_image_repos = "repositories",
)
_go_image_repos()
# Golang images
# This is using gcr.io/distroless/base
load(
"@io_bazel_rules_docker//go:image.bzl",
_go_image_repos = "repositories",
)
_go_image_repos()
# CC images
@@ -209,13 +240,6 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
)
http_archive(
name = "com_github_herumi_bls_eth_go_binary",
sha256 = "b5628a95bd1e6ff84f73d87c134bb1e7e9c1a5a2a10b831867d9dad7d8defc3e",
strip_prefix = "bls-go-binary-8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d",
url = "https://github.com/nisdas/bls-go-binary/archive/8ee33d1a2e8ba8dcf0c3d0b459d75d42d163339d.zip",
)
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
@@ -242,9 +266,9 @@ all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//v
http_archive(
name = "rules_foreign_cc",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
strip_prefix = "rules_foreign_cc-456425521973736ef346d93d3d6ba07d807047df",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/456425521973736ef346d93d3d6ba07d807047df.zip",
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
@@ -256,6 +280,7 @@ rules_foreign_cc_dependencies([
http_archive(
name = "librdkafka",
build_file_content = all_content,
sha256 = "f6be27772babfdacbbf2e4c5432ea46c57ef5b7d82e52a81b885e7b804781fd6",
strip_prefix = "librdkafka-1.2.1",
urls = ["https://github.com/edenhill/librdkafka/archive/v1.2.1.tar.gz"],
)
@@ -264,7 +289,7 @@ http_archive(
go_repository(
name = "com_github_ethereum_go_ethereum",
commit = "40beaeef26d5a2a0918dec2b960c2556c71a90a0",
commit = "861ae1b1875c17d86a6a5d68118708ab2b099658",
importpath = "github.com/ethereum/go-ethereum",
# Note: go-ethereum is not bazel-friendly with regards to cgo. We have a
# a fork that has resolved these issues by disabling HID/USB support and
@@ -279,12 +304,10 @@ go_repository(
name = "com_github_prysmaticlabs_go_ssz",
commit = "e24db4d9e9637cf88ee9e4a779e339a1686a84ee",
importpath = "github.com/prysmaticlabs/go-ssz",
)
go_repository(
name = "com_github_urfave_cli",
commit = "e6cf83ec39f6e1158ced1927d4ed14578fda8edb", # v1.21.0
importpath = "github.com/urfave/cli",
patch_args = ["-p1"],
patches = [
"//third_party:com_github_prysmaticlabs_go_ssz.patch",
],
)
go_repository(
@@ -615,8 +638,9 @@ go_repository(
go_repository(
name = "org_golang_x_crypto",
commit = "4def268fd1a49955bfb3dda92fe3db4f924f2285",
importpath = "golang.org/x/crypto",
sum = "h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=",
version = "v0.0.0-20200221231518-2aa609cf4a9d",
)
go_repository(
@@ -742,12 +766,6 @@ go_repository(
importpath = "github.com/matttproud/golang_protobuf_extensions",
)
go_repository(
name = "com_github_boltdb_bolt",
commit = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8", # v1.3.1
importpath = "github.com/boltdb/bolt",
)
go_repository(
name = "com_github_pborman_uuid",
commit = "8b1b92947f46224e3b97bb1a3a5b0382be00d31e", # v1.2.0
@@ -892,6 +910,13 @@ go_repository(
importpath = "k8s.io/client-go",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=",
version = "v1.3.4",
)
go_repository(
name = "io_k8s_apimachinery",
build_file_proto_mode = "disable_global",
@@ -1176,7 +1201,7 @@ go_repository(
go_repository(
name = "com_github_prysmaticlabs_go_bitfield",
commit = "dbb55b15e92f897ee230360c8d9695e2f224b117",
commit = "62c2aee7166951c456888f92237aee4303ba1b9d",
importpath = "github.com/prysmaticlabs/go-bitfield",
)
@@ -1187,8 +1212,9 @@ go_ssz_dependencies()
go_repository(
name = "org_golang_google_grpc",
build_file_proto_mode = "disable",
commit = "1d89a3c832915b2314551c1d2a506874d62e53f7", # v1.22.0
importpath = "google.golang.org/grpc",
sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=",
version = "v1.27.1",
)
go_repository(
@@ -1215,12 +1241,6 @@ go_repository(
importpath = "golang.org/x/time",
)
go_repository(
name = "in_gopkg_natefinch_npipe_v2",
commit = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6",
importpath = "gopkg.in/natefinch/npipe.v2",
)
go_repository(
name = "com_github_googleapis_gnostic",
commit = "896953e6749863beec38e27029c804e88c3144b8", # v0.4.1
@@ -1277,12 +1297,6 @@ go_repository(
importpath = "k8s.io/utils",
)
go_repository(
name = "com_github_googleapis_gnostic",
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
importpath = "github.com/googleapis/gnostic",
)
go_repository(
name = "com_github_patrickmn_go_cache",
commit = "46f407853014144407b6c2ec7ccc76bf67958d93",
@@ -1291,7 +1305,7 @@ go_repository(
go_repository(
name = "com_github_prysmaticlabs_ethereumapis",
commit = "fca4d6f69bedb8615c2fc916d1a68f2692285caa",
commit = "62fd1d2ec119bc93b0473fde17426c63a85197ed",
importpath = "github.com/prysmaticlabs/ethereumapis",
patch_args = ["-p1"],
patches = [
@@ -1301,8 +1315,9 @@ go_repository(
go_repository(
name = "com_github_cloudflare_roughtime",
commit = "d41fdcee702eb3e5c3296288a453b9340184d37e",
importpath = "github.com/cloudflare/roughtime",
sum = "h1:jeSxE3fepJdhASERvBHI6RFkMhISv6Ir2JUybYLIVXs=",
version = "v0.0.0-20200205191924-a69ef1dab727",
)
go_repository(
@@ -1325,13 +1340,6 @@ go_repository(
version = "v0.0.4",
)
go_repository(
name = "com_github_mdlayher_prombolt",
importpath = "github.com/mdlayher/prombolt",
sum = "h1:N257g6TTx0LxYoskSDFxvkSJ3NOZpy9IF1xQ7Gu+K8I=",
version = "v0.0.0-20161005185022-dfcf01d20ee9",
)
go_repository(
name = "com_github_minio_highwayhash",
importpath = "github.com/minio/highwayhash",
@@ -1369,13 +1377,6 @@ go_repository(
version = "v0.10.5",
)
go_repository(
name = "in_gopkg_urfave_cli_v1",
importpath = "gopkg.in/urfave/cli.v1",
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
version = "v1.20.0",
)
go_repository(
name = "com_github_naoina_go_stringutil",
importpath = "github.com/naoina/go-stringutil",
@@ -1453,9 +1454,9 @@ go_repository(
)
go_repository(
name = "com_github_emicklei_dot",
commit = "f4a04130244d60cef56086d2f649b4b55e9624aa",
importpath = "github.com/emicklei/dot",
name = "com_github_googleapis_gnostic",
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
importpath = "github.com/googleapis/gnostic",
)
go_repository(
@@ -1498,18 +1499,6 @@ go_repository(
version = "v0.0.1",
)
go_repository(
name = "com_github_dgraph_io_ristretto",
commit = "99d1bbbf28e64530eb246be0568fc7709a35ebdd",
importpath = "github.com/dgraph-io/ristretto",
)
go_repository(
name = "com_github_cespare_xxhash",
commit = "d7df74196a9e781ede915320c11c378c1b2f3a1f",
importpath = "github.com/cespare/xxhash",
)
go_repository(
name = "com_github_kevinms_leakybucket_go",
importpath = "github.com/kevinms/leakybucket-go",
@@ -1601,3 +1590,69 @@ go_repository(
sum = "h1:J1gHJRNFEk7NdiaPQQqAvxEy+7hhCsVv3uzduWybmqY=",
version = "v0.0.0-20200302201340-8c54356e12c9",
)
go_repository(
name = "com_github_ferranbt_fastssz",
commit = "06015a5d84f9e4eefe2c21377ca678fa8f1a1b09",
importpath = "github.com/ferranbt/fastssz",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "in_gopkg_urfave_cli_v2",
importpath = "gopkg.in/urfave/cli.v2",
sum = "h1:OvXt/p4cdwNl+mwcWMq/AxaKFkhdxcjx+tx+qf4EOvY=",
version = "v2.0.0-20190806201727-b62605953717",
)
go_repository(
name = "in_gopkg_urfave_cli_v1",
importpath = "gopkg.in/urfave/cli.v1",
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
version = "v1.20.0",
)
go_repository(
name = "com_github_prysmaticlabs_prombbolt",
importpath = "github.com/prysmaticlabs/prombbolt",
sum = "h1:bVD46NhbqEE6bsIqj42TCS3ELUdumti3WfAw9DXNtkg=",
version = "v0.0.0-20200324184628-09789ef63796",
)
load("@com_github_prysmaticlabs_prombbolt//:repositories.bzl", "prombbolt_dependencies")
prombbolt_dependencies()
go_repository(
name = "com_github_ianlancetaylor_cgosymbolizer",
importpath = "github.com/ianlancetaylor/cgosymbolizer",
sum = "h1:GWsU1WjSE2rtvyTYGcndqmPPkQkBNV7pEuZdnGtwtu4=",
version = "v0.0.0-20200321040036-d43e30eacb43",
)

11
bazel.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more heremtic build.
env -i \
PATH=/usr/bin:/bin \
HOME=$HOME \
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
bazel "$@"

View File

@@ -1,7 +1,7 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
load("//tools:binary_targets.bzl", "binary_targets", "go_image_debug")
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
go_library(
@@ -23,9 +23,10 @@ go_library(
"@com_github_ipfs_go_log//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@com_github_whyrusleeping_go_logging//:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
],
)
@@ -36,7 +37,11 @@ go_image(
"main.go",
"usage.go",
],
base = "//tools:cc_image",
base = select({
"//tools:base_image_alpine": "//tools:alpine_cc_image",
"//tools:base_image_cc": "//tools:cc_image",
"//conditions:default": "//tools:cc_image",
}),
goarch = "amd64",
goos = "linux",
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
@@ -55,9 +60,10 @@ go_image(
"@com_github_ipfs_go_log//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@com_github_whyrusleeping_go_logging//:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
],
)
@@ -74,6 +80,7 @@ container_bundle(
go_image_debug(
name = "image_debug",
image = ":image",
tags = ["manual"],
)
container_bundle(
@@ -85,6 +92,21 @@ container_bundle(
tags = ["manual"],
)
go_image_alpine(
name = "image_alpine",
image = ":image",
tags = ["manual"],
)
container_bundle(
name = "image_bundle_alpine",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
},
tags = ["manual"],
)
docker_push(
name = "push_images",
bundle = ":image_bundle",
@@ -97,6 +119,12 @@ docker_push(
tags = ["manual"],
)
docker_push(
name = "push_images_alpine",
bundle = ":image_bundle_alpine",
tags = ["manual"],
)
go_binary(
name = "beacon-chain",
embed = [":go_default_library"],
@@ -111,17 +139,8 @@ go_test(
size = "small",
srcs = ["usage_test.go"],
embed = [":go_default_library"],
deps = ["@com_github_urfave_cli//:go_default_library"],
deps = [
"//shared/featureconfig:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)
[go_binary(
name = "beacon-chain-{}-{}".format(
pair[0],
pair[1],
),
embed = [":go_default_library"],
goarch = pair[1],
goos = pair[0],
tags = ["manual"],
visibility = ["//visibility:public"],
) for pair in binary_targets]

View File

@@ -40,11 +40,13 @@ go_library(
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
@@ -115,6 +117,13 @@ go_test(
"service_norace_test.go",
],
embed = [":go_default_library"],
gc_goopts = [
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
# See: https://github.com/etcd-io/bbolt/issues/187.
"-d=checkptr=0",
],
race = "on",
tags = ["race_on"],
deps = [

View File

@@ -186,7 +186,7 @@ func (s *Service) CurrentFork() *pb.Fork {
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
}
return s.headState().Fork()
return s.head.state.Fork()
}
// Participation returns the participation stats of a given epoch.

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -58,9 +59,15 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
return nil
}
} else {
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
}
}
// Get the new head block from DB.
@@ -74,15 +81,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Get the new head state from cached state or DB.
var newHeadState *state.BeaconState
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if featureconfig.Get().NewStateMgmt {
newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
} else {
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
}
}
if newHeadState == nil {
@@ -108,19 +119,27 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
return errors.New("cannot save nil head block")
}
headState, err := s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
var headState *state.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
headState, err = s.stateGen.StateByRoot(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
} else {
headState, err = s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
}
s.initSyncStateLock.RUnlock()
}
s.initSyncStateLock.RUnlock()
}
if headState == nil {
return errors.New("nil head state")
}

View File

@@ -5,9 +5,11 @@ import (
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -168,12 +170,29 @@ func (s *Service) generateState(ctx context.Context, startRoot [32]byte, endRoot
return nil, err
}
if preState == nil {
return nil, errors.New("finalized state does not exist in db")
preState, err = s.stateGen.StateByRoot(ctx, startRoot)
if err != nil {
return nil, err
}
if preState == nil {
return nil, errors.New("finalized state does not exist in db")
}
}
endBlock, err := s.beaconDB.Block(ctx, endRoot)
if err != nil {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, err
}
var endBlock *ethpb.SignedBeaconBlock
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(endRoot) {
endBlock = s.getInitSyncBlock(endRoot)
s.clearInitSyncBlocks()
} else {
endBlock, err = s.beaconDB.Block(ctx, endRoot)
if err != nil {
return nil, err
}
}
if endBlock == nil {
return nil, errors.New("provided block root does not have block saved in the db")
}
@@ -189,3 +208,48 @@ func (s *Service) generateState(ctx context.Context, startRoot [32]byte, endRoot
}
return postState, nil
}
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b *ethpb.SignedBeaconBlock) {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks[r] = b
}
// This checks if a beacon block exists in the initial sync blocks cache using the root
// of the block.
func (s *Service) hasInitSyncBlock(r [32]byte) bool {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
_, ok := s.initSyncBlocks[r]
return ok
}
// This retrieves a beacon block from the initial sync blocks cache using the root of
// the block.
func (s *Service) getInitSyncBlock(r [32]byte) *ethpb.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
b := s.initSyncBlocks[r]
return b
}
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
// blocks are unordered.
func (s *Service) getInitSyncBlocks() []*ethpb.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
blks := make([]*ethpb.SignedBeaconBlock, 0, len(s.initSyncBlocks))
for _, b := range s.initSyncBlocks {
blks = append(blks, b)
}
return blks
}
// This clears out the initial sync blocks cache.
func (s *Service) clearInitSyncBlocks() {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks = make(map[[32]byte]*ethpb.SignedBeaconBlock)
}

View File

@@ -6,16 +6,18 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
messagediff "gopkg.in/d4l3k/messagediff.v1"
)
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
@@ -209,7 +211,7 @@ func TestPruneNonBoundary_CanPrune(t *testing.T) {
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
service, err := NewService(context.Background(), cfg)
if err != nil {
t.Fatal(err)

View File

@@ -30,30 +30,40 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
if cachedState != nil {
return cachedState, nil
}
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
var baseState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
baseState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
} else {
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
return nil, errors.Wrapf(err, "could not get head root")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return st, nil
}
return st, nil
}
baseState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
}
baseState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
@@ -100,7 +110,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
}
if b.Block.Slot > data.Slot {
return fmt.Errorf("could not process attestation for future block, %d > %d", b.Block.Slot, data.Slot)
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block.Slot, data.Slot)
}
return nil
}
@@ -111,19 +121,23 @@ func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.Be
if err != nil {
return nil, err
}
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, a, committee)
if err != nil {
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
if err == blocks.ErrSigFailedToVerify {
// When sig fails to verify, check if there's a differences in committees due to
// different seeds.
aState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
var aState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
return nil, err
}
aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
epoch := helpers.SlotToEpoch(a.Data.Slot)
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {

View File

@@ -149,7 +149,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
LatestBlockHeader: &ethpb.BeaconBlockHeader{},
JustificationBits: []byte{0},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
FinalizedCheckpoint: &ethpb.Checkpoint{},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
})
r := [32]byte{'g'}
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
@@ -160,7 +160,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'}))
s1, err := service.getAttPreState(ctx, cp1)
if err != nil {
@@ -170,7 +170,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'}))
s2, err := service.getAttPreState(ctx, cp2)
if err != nil {
@@ -209,7 +209,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'}))
s3, err := service.getAttPreState(ctx, cp3)
if err != nil {

View File

@@ -7,17 +7,22 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This defines size of the upper bound for initial sync block cache.
var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
//
// Spec pseudocode definition:
@@ -66,7 +71,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
}
preStateValidatorCount := preState.NumValidators()
root, err := ssz.HashTreeRoot(b)
root, err := stateutil.BlockRoot(b)
if err != nil {
return nil, errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
@@ -88,8 +93,14 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
} else {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
}
// Update justified check point.
@@ -101,22 +112,32 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, err
}
s.clearInitSyncBlocks()
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
// Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned.
s.forkChoiceStore.Prune(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
s.forkChoiceStore.Prune(ctx, fRoot)
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
@@ -124,11 +145,22 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return nil, errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
fBlock, err := s.beaconDB.Block(ctx, fRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block to migrate")
}
if err := s.stateGen.MigrateToCold(ctx, fBlock.Block.Slot, fRoot); err != nil {
return nil, errors.Wrap(err, "could not migrate to cold")
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
return nil, errors.Wrap(err, "could not save new validators")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -173,35 +205,49 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
b := signed.Block
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
// Retrieve incoming block's pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := preState.NumValidators()
// Exit early if the pre state slot is higher than incoming block's slot.
if preState.Slot() >= signed.Block.Slot {
return nil
}
preStateValidatorCount := preState.NumValidators()
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
root, err := ssz.HashTreeRoot(b)
root, err := stateutil.BlockRoot(b)
if err != nil {
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
}
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
s.saveInitSyncBlock(root, signed)
} else {
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
}
if err := s.insertBlockToForkChoiceStore(ctx, b, root, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
} else {
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
}
if flags.Get().EnableArchive {
atts := signed.Block.Body.Attestations
@@ -217,19 +263,36 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
}
}
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if len(s.getInitSyncBlocks()) > int(initialSyncBlockCacheSize) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
@@ -242,21 +305,34 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
fBlock, err := s.beaconDB.Block(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block to migrate")
}
if err := s.stateGen.MigrateToCold(ctx, fBlock.Block.Slot, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
return errors.Wrap(err, "could not save new validators")
}
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
if !featureconfig.Get().NewStateMgmt {
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -272,7 +348,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return err
}
if helpers.IsEpochStart(postState.Slot()) {
if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}
@@ -303,10 +379,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
if err != nil {
return err
}
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return err
}
indices := attestationutil.AttestingIndices(a.AggregationBits, committee)
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
}

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -15,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -22,7 +22,7 @@ import (
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() uint64 {
return uint64(time.Now().Unix()-s.genesisTime.Unix()) / params.BeaconConfig().SecondsPerSlot
return uint64(roughtime.Now().Unix()-s.genesisTime.Unix()) / params.BeaconConfig().SecondsPerSlot
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
@@ -58,6 +58,20 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
defer span.End()
if featureconfig.Get().NewStateMgmt {
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
return preState, nil // No copy needed from newly hydrated state gen object.
}
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
var err error
if preState == nil {
@@ -215,21 +229,36 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
return true, nil
}
newJustifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(newJustifiedCheckpt.Root))
if err != nil {
return false, err
var newJustifiedBlockSigned *ethpb.SignedBeaconBlock
justifiedRoot := bytesutil.ToBytes32(newJustifiedCheckpt.Root)
var err error
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(justifiedRoot) {
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
} else {
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
if err != nil {
return false, err
}
}
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
return false, errors.New("nil new justified block")
}
newJustifiedBlock := newJustifiedBlockSigned.Block
if newJustifiedBlock.Slot <= helpers.StartSlot(s.justifiedCheckpt.Epoch) {
return false, nil
}
justifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
return false, err
var justifiedBlockSigned *ethpb.SignedBeaconBlock
cachedJustifiedRoot := bytesutil.ToBytes32(s.justifiedCheckpt.Root)
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(cachedJustifiedRoot) {
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
} else {
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
if err != nil {
return false, err
}
}
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
return false, errors.New("nil justified block")
}
@@ -253,29 +282,33 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
if err != nil {
return err
}
if canUpdate {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cpt
}
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
if !featureconfig.Get().NewStateMgmt {
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
var err error
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
}
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
@@ -360,6 +393,11 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
}
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(bytesutil.ToBytes32(root)) {
signed = s.getInitSyncBlock(bytesutil.ToBytes32(root))
}
if signed == nil || signed.Block == nil {
return nil, errors.New("nil block")
}

View File

@@ -90,7 +90,13 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
ctx := context.Background()
atts := s.attPool.ForkchoiceAttestations()
for _, a := range atts {
hasState := s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
var hasState bool
if featureconfig.Get().NewStateMgmt {
hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
} else {
hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
}
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue

View File

@@ -13,6 +13,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
@@ -25,6 +26,7 @@ type BlockReceiver interface {
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error
HasInitSyncBlock(root [32]byte) bool
}
// ReceiveBlock is a function that defines the operations that are preformed on
@@ -88,7 +90,7 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedB
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
root, err := ssz.HashTreeRoot(blockCopy.Block)
root, err := stateutil.BlockRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received block")
}
@@ -139,7 +141,7 @@ func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *eth
return err
}
root, err := ssz.HashTreeRoot(blockCopy.Block)
root, err := stateutil.BlockRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received block")
}
@@ -191,7 +193,7 @@ func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedB
return err
}
root, err := ssz.HashTreeRoot(blockCopy.Block)
root, err := stateutil.BlockRoot(blockCopy.Block)
if err != nil {
return errors.Wrap(err, "could not get signing root on received blockCopy")
}
@@ -235,3 +237,8 @@ func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedB
return nil
}
// HasInitSyncBlock returns true if the block of the input root exists in initial sync blocks cache.
func (s *Service) HasInitSyncBlock(root [32]byte) bool {
return s.hasInitSyncBlock(root)
}

View File

@@ -33,6 +33,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -73,6 +74,9 @@ type Service struct {
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
stateGen *stategen.State
opsService *attestations.Service
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
}
// Config options for the service.
@@ -88,6 +92,8 @@ type Config struct {
MaxRoutines int64
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
OpsService *attestations.Service
StateGen *stategen.State
}
// NewService instantiates a new block service instance that will
@@ -111,7 +117,9 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
checkpointState: cache.NewCheckpointStateCache(),
stateGen: stategen.New(cfg.BeaconDB),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
}, nil
}
@@ -130,10 +138,18 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if beaconState == nil {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
if featureconfig.Get().NewStateMgmt {
beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
} else {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
}
}
@@ -144,6 +160,7 @@ func (s *Service) Start() {
if beaconState != nil {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
s.opsService.SetGenesisTime(beaconState.GenesisTime())
if err := s.initializeChainInfo(ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
@@ -164,9 +181,11 @@ func (s *Service) Start() {
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
if finalizedCheckpoint.Epoch > 1 {
if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil {
log.WithError(err).Warn("Could not prune old states")
if !featureconfig.Get().NewStateMgmt {
if finalizedCheckpoint.Epoch > 1 {
if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil {
log.WithError(err).Warn("Could not prune old states")
}
}
}
@@ -260,6 +279,8 @@ func (s *Service) initializeBeaconChain(
return err
}
s.opsService.SetGenesisTime(genesisState.GenesisTime())
return nil
}
@@ -298,7 +319,7 @@ func (s *Service) saveGenesisValidators(ctx context.Context, state *stateTrie.Be
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
stateRoot, err := genesisState.HashTreeRoot()
stateRoot, err := genesisState.HashTreeRoot(ctx)
if err != nil {
return err
}
@@ -311,8 +332,20 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
}); err != nil {
return err
}
} else {
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
}
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
@@ -388,11 +421,25 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
finalizedState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
var finalizedState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
finalizedState, err = s.stateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if finalizedRoot == params.BeaconConfig().ZeroHash {
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
}
} else {
finalizedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
}
finalizedBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
@@ -400,8 +447,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if finalizedState == nil || finalizedBlock == nil {
return errors.New("finalized state and block can't be nil")
}
s.setHead(bytesutil.ToBytes32(finalized.Root), finalizedBlock, finalizedState)
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
return nil
}
@@ -422,6 +468,10 @@ func (s *Service) pruneGarbageState(ctx context.Context, slot uint64) error {
return err
}
if err := s.beaconDB.SaveLastArchivedIndex(ctx, 0); err != nil {
return err
}
return nil
}

View File

@@ -133,6 +133,10 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
t.Fatalf("unable to set up web3 service: %v", err)
}
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
if err != nil {
t.Fatal(err)
}
cfg := &Config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
@@ -142,10 +146,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
OpsService: opsService,
}
if err != nil {
t.Fatalf("could not register blockchain service: %v", err)
}
chainService, err := NewService(ctx, cfg)
if err != nil {
t.Fatalf("unable to setup chain service: %v", err)

View File

@@ -219,7 +219,7 @@ func (ms *ChainService) GenesisTime() time.Time {
// CurrentSlot mocks the same method in the chain service.
func (ms *ChainService) CurrentSlot() uint64 {
return 0
return uint64(time.Now().Unix()-ms.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot
}
// Participation mocks the same method in the chain service.
@@ -234,3 +234,8 @@ func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attes
// ClearCachedStates does nothing.
func (ms *ChainService) ClearCachedStates() {}
// HasInitSyncBlock mocks the same method in the chain service.
func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
return false
}

View File

@@ -6,15 +6,21 @@ go_library(
"attestation_data.go",
"checkpoint_state.go",
"committee.go",
"committee_ids.go",
"common.go",
"eth1_data.go",
"hot_state_cache.go",
"skip_slot_cache.go",
"state_summary.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = ["//beacon-chain:__subpackages__"],
visibility = [
"//beacon-chain:__subpackages__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
@@ -24,6 +30,7 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -4,6 +4,7 @@ import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
@@ -11,7 +12,7 @@ import (
)
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
cp := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 64,
})
@@ -45,7 +46,7 @@ func TestCheckpointStateCacheKeyFn_InvalidObj(t *testing.T) {
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
cache := NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 64,
})
@@ -75,7 +76,7 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
t.Error("incorrectly cached state")
}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 128,
})

View File

@@ -96,7 +96,7 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
start, end := startEndIndices(item, indexOffSet)
if int(end) > len(item.ShuffledIndices) {
if int(end) > len(item.ShuffledIndices) || end < start {
return nil, errors.New("requested index out of bound")
}

44
beacon-chain/cache/committee_ids.go vendored Normal file
View File

@@ -0,0 +1,44 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type committeeIDs struct {
cache *lru.Cache
lock sync.RWMutex
}
// CommitteeIDs for attestations.
var CommitteeIDs = newCommitteeIDs()
func newCommitteeIDs() *committeeIDs {
cache, err := lru.New(8)
if err != nil {
panic(err)
}
return &committeeIDs{cache: cache}
}
// AddIDs to the cache for attestation committees by epoch.
func (t *committeeIDs) AddIDs(indices []uint64, epoch uint64) {
t.lock.Lock()
defer t.lock.Unlock()
val, exists := t.cache.Get(epoch)
if exists {
indices = sliceutil.UnionUint64(append(indices, val.([]uint64)...))
}
t.cache.Add(epoch, indices)
}
// GetIDs from the cache for attestation committees by epoch.
func (t *committeeIDs) GetIDs(epoch uint64) []uint64 {
val, exists := t.cache.Get(epoch)
if !exists {
return []uint64{}
}
return val.([]uint64)
}

View File

@@ -1,6 +1,7 @@
package cache
import (
"math"
"reflect"
"sort"
"strconv"
@@ -172,3 +173,19 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
t.Error("incorrect key received for slot 199")
}
}
func TestCommitteeCacheOutOfRange(t *testing.T) {
cache := NewCommitteesCache()
seed := bytesutil.ToBytes32([]byte("foo"))
cache.CommitteeCache.Add(&Committees{
CommitteeCount: 1,
Seed: seed,
ShuffledIndices: []uint64{0},
SortedIndices: []uint64{},
ProposerIndices: []uint64{},
})
_, err := cache.Committee(0, seed, math.MaxUint64) // Overflow!
if err == nil {
t.Fatal("Did not fail as expected")
}
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
var _ = PendingDepositsFetcher(&DepositCache{})
@@ -33,8 +34,12 @@ func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
depToRemove := &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}
otherDep := &ethpb.Deposit{Proof: [][]byte{[]byte("B")}}
proof1 := make([][]byte, 33)
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
proof2 := make([][]byte, 33)
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
depToRemove := &ethpb.Deposit{Proof: proof1}
otherDep := &ethpb.Deposit{Proof: proof2}
db.pendingDeposits = []*dbpb.DepositContainer{
{Deposit: depToRemove, Index: 1},
{Deposit: otherDep, Index: 5},
@@ -57,7 +62,9 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
func TestPendingDeposit_RoundTrip(t *testing.T) {
dc := DepositCache{}
dep := &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}
proof := make([][]byte, 33)
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
dep := &ethpb.Deposit{Proof: proof}
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
if len(dc.pendingDeposits) != 0 {

View File

@@ -10,7 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"go.opencensus.io/trace"
)
var (
@@ -29,6 +29,7 @@ var (
type SkipSlotCache struct {
cache *lru.Cache
lock sync.RWMutex
disabled bool // Allow for programmatic toggling of the cache, useful during initial sync.
inProgress map[uint64]bool
}
@@ -44,10 +45,22 @@ func NewSkipSlotCache() *SkipSlotCache {
}
}
// Enable the skip slot cache.
func (c *SkipSlotCache) Enable() {
c.disabled = false
}
// Disable the skip slot cache.
func (c *SkipSlotCache) Disable() {
c.disabled = true
}
// Get waits for any in progress calculation to complete before returning a
// cached response, if any.
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
if !featureconfig.Get().EnableSkipSlotsCache {
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
defer span.End()
if c.disabled {
// Return a miss result if cache is not enabled.
skipSlotCacheMiss.Inc()
return nil, nil
@@ -57,6 +70,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
// Another identical request may be in progress already. Let's wait until
// any in progress request resolves or our timeout is exceeded.
inProgress := false
for {
if ctx.Err() != nil {
return nil, ctx.Err()
@@ -67,6 +81,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
c.lock.RUnlock()
break
}
inProgress = true
c.lock.RUnlock()
// This increasing backoff is to decrease the CPU cycles while waiting
@@ -75,21 +90,24 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
delay *= delayFactor
delay = math.Min(delay, maxDelay)
}
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
item, exists := c.cache.Get(slot)
if exists && item != nil {
skipSlotCacheHit.Inc()
span.AddAttributes(trace.BoolAttribute("hit", true))
return item.(*stateTrie.BeaconState).Copy(), nil
}
skipSlotCacheMiss.Inc()
span.AddAttributes(trace.BoolAttribute("hit", false))
return nil, nil
}
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
if !featureconfig.Get().EnableSkipSlotsCache {
if c.disabled {
return nil
}
@@ -106,7 +124,7 @@ func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
if !featureconfig.Get().EnableSkipSlotsCache {
if c.disabled {
return nil
}
@@ -119,7 +137,7 @@ func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
// Put the response in the cache.
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
if !featureconfig.Get().EnableSkipSlotsCache {
if c.disabled {
return nil
}

View File

@@ -8,15 +8,11 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func TestSkipSlotCache_RoundTrip(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
fc := featureconfig.Get()
fc.EnableSkipSlotsCache = true
featureconfig.Init(fc)
state, err := c.Get(ctx, 5)
if err != nil {

65
beacon-chain/cache/state_summary.go vendored Normal file
View File

@@ -0,0 +1,65 @@
package cache
import (
"sync"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
// StateSummaryCache caches state summary object.
type StateSummaryCache struct {
initSyncStateSummaries map[[32]byte]*pb.StateSummary
initSyncStateSummariesLock sync.RWMutex
}
// NewStateSummaryCache creates a new state summary cache.
func NewStateSummaryCache() *StateSummaryCache {
return &StateSummaryCache{
initSyncStateSummaries: make(map[[32]byte]*pb.StateSummary),
}
}
// Put saves a state summary to the initial sync state summaries cache.
func (s *StateSummaryCache) Put(r [32]byte, b *pb.StateSummary) {
s.initSyncStateSummariesLock.Lock()
defer s.initSyncStateSummariesLock.Unlock()
s.initSyncStateSummaries[r] = b
}
// Has checks if a state summary exists in the initial sync state summaries cache using the root
// of the block.
func (s *StateSummaryCache) Has(r [32]byte) bool {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
_, ok := s.initSyncStateSummaries[r]
return ok
}
// Get retrieves a state summary from the initial sync state summaries cache using the root of
// the block.
func (s *StateSummaryCache) Get(r [32]byte) *pb.StateSummary {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
b := s.initSyncStateSummaries[r]
return b
}
// GetAll retrieves all the beacon state summaries from the initial sync state summaries cache, the returned
// state summaries are unordered.
func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
blks := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
for _, b := range s.initSyncStateSummaries {
blks = append(blks, b)
}
return blks
}
// Clear clears out the initial sync state summaries cache.
func (s *StateSummaryCache) Clear() {
s.initSyncStateSummariesLock.Lock()
defer s.initSyncStateSummariesLock.Unlock()
s.initSyncStateSummaries = make(map[[32]byte]*pb.StateSummary)
}

View File

@@ -58,6 +58,25 @@ func verifySigningRoot(obj interface{}, pub []byte, signature []byte, domain uin
return nil
}
func verifyBlockRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain uint64) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
}
sig, err := bls.SignatureFromBytes(signature)
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
root, err := stateutil.BlockRoot(blk)
if err != nil {
return errors.Wrap(err, "could not get signing root")
}
if !sig.Verify(root[:], publicKey, domain) {
return ErrSigFailedToVerify
}
return nil
}
// Deprecated: This method uses deprecated ssz.SigningRoot.
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain uint64) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
@@ -223,7 +242,7 @@ func ProcessBlockHeader(
if err != nil {
return nil, err
}
if err := verifySigningRoot(block.Block, proposer.PublicKey, block.Signature, domain); err != nil {
if err := verifyBlockRoot(block.Block, proposer.PublicKey, block.Signature, domain); err != nil {
return nil, ErrSigFailedToVerify
}
@@ -286,7 +305,7 @@ func ProcessBlockHeaderNoVerify(
return nil, fmt.Errorf("proposer at index %d was previously slashed", idx)
}
bodyRoot, err := ssz.HashTreeRoot(block.Body)
bodyRoot, err := stateutil.BlockBodyRoot(block.Body)
if err != nil {
return nil, err
}
@@ -856,10 +875,7 @@ func VerifyAttestation(ctx context.Context, beaconState *stateTrie.BeaconState,
if err != nil {
return err
}
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, att, committee)
if err != nil {
return errors.Wrap(err, "could not convert to indexed attestation")
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
}

View File

@@ -943,7 +943,7 @@ func TestProcessAttestations_OK(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
t.Error(err)
}
@@ -1004,7 +1004,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices1, err := attestationutil.AttestingIndices(att1.AggregationBits, committee)
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
@@ -1032,7 +1032,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices2, err := attestationutil.AttestingIndices(att2.AggregationBits, committee)
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
@@ -1082,7 +1082,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices1, err := attestationutil.AttestingIndices(att1.AggregationBits, committee)
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
@@ -1109,7 +1109,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices2, err := attestationutil.AttestingIndices(att2.AggregationBits, committee)
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
@@ -1240,10 +1240,7 @@ func TestConvertToIndexed_OK(t *testing.T) {
if err != nil {
t.Error(err)
}
ia, err := attestationutil.ConvertToIndexed(context.Background(), attestation, committee)
if err != nil {
t.Errorf("failed to convert attestation to indexed attestation: %v", err)
}
ia := attestationutil.ConvertToIndexed(context.Background(), attestation, committee)
if !reflect.DeepEqual(wanted, ia) {
diff, _ := messagediff.PrettyDiff(ia, wanted)
t.Log(diff)

View File

@@ -21,6 +21,10 @@ import (
"gopkg.in/d4l3k/messagediff.v1"
)
func init() {
state.SkipSlotCache.Disable()
}
func runBlockProcessingTest(t *testing.T, config string) {
if err := spectest.SetConfig(config); err != nil {
t.Fatal(err)

View File

@@ -170,7 +170,7 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
// a callback is used here to apply the following actions to all validators
// below equally.
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) error {
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, totalBalance)
@@ -178,10 +178,11 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / totalBalance * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
})
return state, err
}
@@ -235,12 +236,12 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
bals := state.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
if val == nil {
return fmt.Errorf("validator %d is nil in state", idx)
return false, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
return false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
@@ -249,8 +250,9 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
}
return true, nil
}
return nil
return false, nil
}
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {
@@ -334,10 +336,7 @@ func unslashedAttestingIndices(state *stateTrie.BeaconState, atts []*pb.PendingA
if err != nil {
return nil, err
}
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, errors.Wrap(err, "could not get attester indices")
}
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
// Create a set for attesting indices
set := make([]uint64, 0, len(attestingIndices))
for _, index := range attestingIndices {

View File

@@ -47,10 +47,7 @@ func ProcessAttestations(
if err != nil {
return nil, nil, err
}
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return nil, nil, err
}
indices := attestationutil.AttestingIndices(a.AggregationBits, committee)
vp = UpdateValidator(vp, v, indices, a, a.Data.Slot)
}

View File

@@ -219,7 +219,7 @@ func TestProcessAttestations(t *testing.T) {
if err != nil {
t.Error(err)
}
indices, _ := attestationutil.AttestingIndices(att1.AggregationBits, committee)
indices := attestationutil.AttestingIndices(att1.AggregationBits, committee)
for _, i := range indices {
if !vp[i].IsPrevEpochAttester {
t.Error("Not a prev epoch attester")
@@ -229,7 +229,7 @@ func TestProcessAttestations(t *testing.T) {
if err != nil {
t.Error(err)
}
indices, _ = attestationutil.AttestingIndices(att2.AggregationBits, committee)
indices = attestationutil.AttestingIndices(att2.AggregationBits, committee)
for _, i := range indices {
if !vp[i].IsPrevEpochAttester {
t.Error("Not a prev epoch attester")

View File

@@ -72,7 +72,7 @@ func attestationDeltas(state *stateTrie.BeaconState, bp *Balance, vp []*Validato
func attestationDelta(state *stateTrie.BeaconState, bp *Balance, v *Validator) (uint64, uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible {
if !eligible || bp.CurrentEpoch == 0 {
return 0, 0
}

View File

@@ -146,6 +146,47 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
}
}
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(2048)
base := buildState(e+2, validatorCount)
atts := make([]*pb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := 0; i < len(atts); i++ {
atts[i] = &pb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
Source: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
BeaconBlockRoot: emptyRoot[:],
},
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x01},
InclusionDelay: 1,
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
if err != nil {
t.Fatal(err)
}
vp, bp := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil {
t.Fatal(err)
}
bp.CurrentEpoch = 0 // Could cause a divide by zero panic.
_, _, err = attestationDeltas(state, bp, vp)
if err != nil {
t.Fatal(err)
}
}
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {

View File

@@ -21,7 +21,7 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
totalSlashing += slashing
}
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, p.CurrentEpoch)
@@ -29,10 +29,11 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / p.CurrentEpoch * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
}
return state.ApplyToEveryValidator(validatorFunc)

View File

@@ -15,8 +15,8 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers",
visibility = [
"//beacon-chain:__subpackages__",
"//shared/testutil:__pkg__",
"//shared/benchutil/benchmark_files:__subpackages__",
"//shared/testutil:__pkg__",
"//slasher:__subpackages__",
"//tools:__subpackages__",
"//validator:__subpackages__",

View File

@@ -134,7 +134,7 @@ func TestAttestationParticipants_NoCommitteeCache(t *testing.T) {
if err != nil {
t.Error(err)
}
result, err := attestationutil.AttestingIndices(tt.bitfield, committee)
result := attestationutil.AttestingIndices(tt.bitfield, committee)
if err != nil {
t.Errorf("Failed to get attestation participants: %v", err)
}
@@ -167,7 +167,7 @@ func TestAttestationParticipants_EmptyBitfield(t *testing.T) {
if err != nil {
t.Fatal(err)
}
indices, err := attestationutil.AttestingIndices(bitfield.NewBitlist(128), committee)
indices := attestationutil.AttestingIndices(bitfield.NewBitlist(128), committee)
if err != nil {
t.Fatalf("attesting indices failed: %v", err)
}

View File

@@ -16,6 +16,7 @@ go_library(
"//shared/testutil:__pkg__",
"//tools/benchmark-files-gen:__pkg__",
"//tools/genesis-state-gen:__pkg__",
"//endtoend:__pkg__",
],
deps = [
"//beacon-chain/cache:go_default_library",
@@ -42,7 +43,6 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
"benchmarks_test.go",
"skip_slot_cache_test.go",
"state_fuzz_test.go",
"state_test.go",
@@ -51,15 +51,14 @@ go_test(
],
data = ["//shared/benchutil/benchmark_files:benchmark_data"],
embed = [":go_default_library"],
shard_count = 3,
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/benchutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
@@ -72,3 +71,31 @@ go_test(
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_benchmark_test",
size = "large",
srcs = ["benchmarks_test.go"],
args = [
"-test.bench=.",
"-test.benchmem",
"-test.v",
],
local = True,
tags = [
"benchmark",
"manual",
"no-cache",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/benchutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
],
)

View File

@@ -1,12 +1,15 @@
package state
package state_benchmark_test
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/benchutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -24,7 +27,7 @@ func TestBenchmarkExecuteStateTransition(t *testing.T) {
t.Fatal(err)
}
if _, err := ExecuteStateTransition(context.Background(), beaconState, block); err != nil {
if _, err := state.ExecuteStateTransition(context.Background(), beaconState, block); err != nil {
t.Fatalf("failed to process block, benchmarks will fail: %v", err)
}
}
@@ -44,7 +47,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
b.N = runAmount
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := ExecuteStateTransition(context.Background(), cleanStates[i], block); err != nil {
if _, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block); err != nil {
b.Fatal(err)
}
}
@@ -72,14 +75,14 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
}
beaconState.SetSlot(currentSlot)
// Run the state transition once to populate the cache.
if _, err := ExecuteStateTransition(context.Background(), beaconState, block); err != nil {
if _, err := state.ExecuteStateTransition(context.Background(), beaconState, block); err != nil {
b.Fatalf("failed to process block, benchmarks will fail: %v", err)
}
b.N = runAmount
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := ExecuteStateTransition(context.Background(), cleanStates[i], block); err != nil {
if _, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block); err != nil {
b.Fatalf("failed to process block, benchmarks will fail: %v", err)
}
}
@@ -105,7 +108,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
for i := 0; i < b.N; i++ {
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
// at run time.
if _, err := ProcessEpochPrecompute(context.Background(), beaconState.Copy()); err != nil {
if _, err := state.ProcessEpochPrecompute(context.Background(), beaconState.Copy()); err != nil {
b.Fatal(err)
}
}
@@ -133,19 +136,88 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
}
// Hydrate the HashTreeRootState cache.
if _, err := beaconState.HashTreeRoot(); err != nil {
if _, err := beaconState.HashTreeRoot(ctx); err != nil {
b.Fatal(err)
}
b.N = 50
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := beaconState.HashTreeRoot(); err != nil {
if _, err := beaconState.HashTreeRoot(ctx); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkMarshalState_FullState(b *testing.B) {
beaconState, err := benchutil.PreGenState2FullEpochs()
if err != nil {
b.Fatal(err)
}
natState := beaconState.InnerStateUnsafe()
b.Run("Proto_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
if _, err := proto.Marshal(natState); err != nil {
b.Fatal(err)
}
}
})
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
if _, err := natState.MarshalSSZ(); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkUnmarshalState_FullState(b *testing.B) {
beaconState, err := benchutil.PreGenState2FullEpochs()
if err != nil {
b.Fatal(err)
}
natState := beaconState.InnerStateUnsafe()
protoObject, err := proto.Marshal(natState)
if err != nil {
b.Fatal(err)
}
sszObject, err := natState.MarshalSSZ()
if err != nil {
b.Fatal(err)
}
b.Run("Proto_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
if err := proto.Unmarshal(protoObject, &pb.BeaconState{}); err != nil {
b.Fatal(err)
}
}
})
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
sszState := &pb.BeaconState{}
if err := sszState.UnmarshalSSZ(sszObject); err != nil {
b.Fatal(err)
}
}
})
}
func clonedStates(beaconState *beaconstate.BeaconState) []*beaconstate.BeaconState {
clonedStates := make([]*beaconstate.BeaconState, runAmount)
for i := 0; i < runAmount; i++ {

View File

@@ -4,8 +4,8 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
)
// skipSlotCache exists for the unlikely scenario that is a large gap between the head state and
// SkipSlotCache exists for the unlikely scenario that is a large gap between the head state and
// the current slot. If the beacon chain were ever to be stalled for several epochs, it may be
// difficult or impossible to compute the appropriate beacon state for assignments within a
// reasonable amount of time.
var skipSlotCache = cache.NewSkipSlotCache()
var SkipSlotCache = cache.NewSkipSlotCache()

View File

@@ -7,26 +7,19 @@ import (
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestSkipSlotCache_OK(t *testing.T) {
state.SkipSlotCache.Enable()
defer state.SkipSlotCache.Disable()
bState, privs := testutil.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
originalState, _ := beaconstate.InitializeFromProto(bState.CloneInnerState())
blkCfg := testutil.DefaultBlockGenConfig()
blkCfg.NumAttestations = 1
cfg := featureconfig.Get()
cfg.EnableSkipSlotsCache = true
featureconfig.Init(cfg)
defer func() {
cfg.EnableSkipSlotsCache = false
featureconfig.Init(cfg)
}()
// First transition will be with an empty cache, so the cache becomes populated
// with the state
blk, err := testutil.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)

View File

@@ -34,8 +34,8 @@ go_test(
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)
@@ -68,8 +68,8 @@ go_test(
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)

View File

@@ -15,6 +15,10 @@ import (
"gopkg.in/d4l3k/messagediff.v1"
)
func init() {
state.SkipSlotCache.Disable()
}
func runSlotProcessingTests(t *testing.T, config string) {
if err := spectest.SetConfig(config); err != nil {
t.Fatal(err)

View File

@@ -9,6 +9,8 @@ import (
)
func TestGenesisBeaconState_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
deposits := make([]*ethpb.Deposit, 300000)
@@ -29,6 +31,8 @@ func TestGenesisBeaconState_1000(t *testing.T) {
}
func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
var genesisTime uint64
@@ -49,6 +53,8 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
}
func TestIsValidGenesisState_100000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
var chainStartDepositCount, currentTime uint64

View File

@@ -72,7 +72,7 @@ func ExecuteStateTransition(
interop.WriteBlockToDisk(signed, false)
interop.WriteStateToDisk(state)
postStateRoot, err := state.HashTreeRoot()
postStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
return nil, err
}
@@ -181,7 +181,7 @@ func CalculateStateRoot(
return [32]byte{}, errors.Wrap(err, "could not process block")
}
return state.HashTreeRoot()
return state.HashTreeRoot(ctx)
}
// ProcessSlot happens every slot and focuses on the slot counter and block roots record updates.
@@ -205,7 +205,7 @@ func ProcessSlot(ctx context.Context, state *stateTrie.BeaconState) (*stateTrie.
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
prevStateRoot, err := state.HashTreeRoot()
prevStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
return nil, err
}
@@ -274,7 +274,7 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
key := state.Slot()
// Restart from cached value, if one exists.
cachedState, err := skipSlotCache.Get(ctx, key)
cachedState, err := SkipSlotCache.Get(ctx, key)
if err != nil {
return nil, err
}
@@ -283,8 +283,8 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
highestSlot = cachedState.Slot()
state = cachedState
}
if err := skipSlotCache.MarkInProgress(key); err == cache.ErrAlreadyInProgress {
cachedState, err = skipSlotCache.Get(ctx, key)
if err := SkipSlotCache.MarkInProgress(key); err == cache.ErrAlreadyInProgress {
cachedState, err = SkipSlotCache.Get(ctx, key)
if err != nil {
return nil, err
}
@@ -295,14 +295,14 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
} else if err != nil {
return nil, err
}
defer skipSlotCache.MarkNotInProgress(key)
defer SkipSlotCache.MarkNotInProgress(key)
for state.Slot() < slot {
if ctx.Err() != nil {
traceutil.AnnotateError(span, ctx.Err())
// Cache last best value.
if highestSlot < state.Slot() {
skipSlotCache.Put(ctx, key, state)
SkipSlotCache.Put(ctx, key, state)
}
return nil, ctx.Err()
}
@@ -322,7 +322,7 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
}
if highestSlot < state.Slot() {
skipSlotCache.Put(ctx, key, state)
SkipSlotCache.Put(ctx, key, state)
}
return state, nil

View File

@@ -10,6 +10,8 @@ import (
)
func TestFuzzExecuteStateTransition_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
@@ -26,6 +28,8 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
}
func TestFuzzExecuteStateTransitionNoVerifyAttSigs_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
@@ -42,6 +46,8 @@ func TestFuzzExecuteStateTransitionNoVerifyAttSigs_1000(t *testing.T) {
}
func TestFuzzCalculateStateRoot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
@@ -58,6 +64,8 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
}
func TestFuzzProcessSlot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
@@ -72,6 +80,8 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
}
func TestFuzzProcessSlots_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
slot := uint64(0)
@@ -88,6 +98,8 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
}
func TestFuzzProcessBlock_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
@@ -104,6 +116,8 @@ func TestFuzzProcessBlock_1000(t *testing.T) {
}
func TestFuzzProcessBlockNoVerifyAttSigs_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
@@ -120,6 +134,8 @@ func TestFuzzProcessBlockNoVerifyAttSigs_1000(t *testing.T) {
}
func TestFuzzProcessOperations_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
bb := &ethpb.BeaconBlockBody{}
@@ -136,6 +152,8 @@ func TestFuzzProcessOperations_1000(t *testing.T) {
}
func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
bb := &ethpb.BeaconBlockBody{}
@@ -152,6 +170,8 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
}
func TestFuzzverifyOperationLengths_10000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
state := &stateTrie.BeaconState{}
bb := &ethpb.BeaconBlockBody{}
fuzzer := fuzz.NewWithSeed(0)
@@ -164,6 +184,8 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
}
func TestFuzzCanProcessEpoch_10000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
state := &stateTrie.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -174,6 +196,8 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) {
}
func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
@@ -188,6 +212,8 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
}
func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state := &stateTrie.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}

View File

@@ -25,6 +25,10 @@ import (
"github.com/sirupsen/logrus"
)
func init() {
state.SkipSlotCache.Disable()
}
func TestExecuteStateTransition_IncorrectSlot(t *testing.T) {
base := &pb.BeaconState{
Slot: 5,
@@ -428,7 +432,7 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices, err := attestationutil.AttestingIndices(blockAtt.AggregationBits, committee)
attestingIndices := attestationutil.AttestingIndices(blockAtt.AggregationBits, committee)
if err != nil {
t.Error(err)
}
@@ -743,7 +747,7 @@ func TestProcessBlk_AttsBasedOnValidatorCount(t *testing.T) {
if err != nil {
t.Error(err)
}
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
t.Error(err)
}

View File

@@ -166,7 +166,7 @@ func SlashValidator(state *stateTrie.BeaconState, slashedIdx uint64, whistleBlow
return state, nil
}
// ActivatedValidatorIndices determines the indices activated during the current epoch.
// ActivatedValidatorIndices determines the indices activated during the given epoch.
func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
activations := make([]uint64, 0)
delayedActivationEpoch := helpers.ActivationExitEpoch(epoch)
@@ -179,7 +179,7 @@ func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []ui
return activations
}
// SlashedValidatorIndices determines the indices slashed during the current epoch.
// SlashedValidatorIndices determines the indices slashed during the given epoch.
func SlashedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
slashed := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
@@ -225,9 +225,51 @@ func ExitedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeV
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
exited = append(exited, uint64(i))
}
}
return exited, nil
}
// EjectedValidatorIndices determines the indices ejected during the given epoch.
func EjectedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeValidatorCount uint64) ([]uint64, error) {
ejected := make([]uint64, 0)
exitEpochs := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch)
}
}
exitQueueEpoch := uint64(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := 0
for _, val := range validators {
if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++
}
}
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, errors.Wrap(err, "could not get churn limit")
}
if churn < uint64(exitQueueChurn) {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
ejected = append(ejected, uint64(i))
}
}
return ejected, nil
}

View File

@@ -344,14 +344,17 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -364,6 +367,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -376,6 +380,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},

View File

@@ -26,6 +26,7 @@ go_library(
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -1,8 +1,11 @@
package db
import "github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
)
// NewDB initializes a new DB.
func NewDB(dirPath string) (Database, error) {
return kv.NewKVStore(dirPath)
func NewDB(dirPath string, stateSummaryCache *cache.StateSummaryCache) (Database, error) {
return kv.NewKVStore(dirPath, stateSummaryCache)
}

View File

@@ -1,13 +1,14 @@
package db
import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kafka"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
)
// NewDB initializes a new DB with kafka wrapper.
func NewDB(dirPath string) (Database, error) {
db, err := kv.NewKVStore(dirPath)
func NewDB(dirPath string, stateSummaryCache *cache.StateSummaryCache) (Database, error) {
db, err := kv.NewKVStore(dirPath, stateSummaryCache)
if err != nil {
return nil, err
}

View File

@@ -27,6 +27,8 @@ type ReadOnlyDatabase interface {
HasBlock(ctx context.Context, blockRoot [32]byte) bool
GenesisBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
HighestSlotBlocks(ctx context.Context) ([]*ethpb.SignedBeaconBlock, error)
HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*ethpb.SignedBeaconBlock, error)
// Validator related methods.
ValidatorIndex(ctx context.Context, publicKey []byte) (uint64, bool, error)
HasValidatorIndex(ctx context.Context, publicKey []byte) bool
@@ -36,6 +38,8 @@ type ReadOnlyDatabase interface {
HasState(ctx context.Context, blockRoot [32]byte) bool
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethereum_beacon_p2p_v1.StateSummary, error)
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
HighestSlotStates(ctx context.Context) ([]*state.BeaconState, error)
HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*state.BeaconState, error)
// Slashing operations.
ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.ProposerSlashing, error)
AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.AttesterSlashing, error)
@@ -52,9 +56,9 @@ type ReadOnlyDatabase interface {
ArchivedCommitteeInfo(ctx context.Context, epoch uint64) (*ethereum_beacon_p2p_v1.ArchivedCommitteeInfo, error)
ArchivedBalances(ctx context.Context, epoch uint64) ([]uint64, error)
ArchivedValidatorParticipation(ctx context.Context, epoch uint64) (*eth.ValidatorParticipation, error)
ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error)
ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
HasArchivedPoint(ctx context.Context, index uint64) bool
LastArchivedIndexRoot(ctx context.Context) [32]byte
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
@@ -86,6 +90,7 @@ type NoHeadAccessDatabase interface {
DeleteState(ctx context.Context, blockRoot [32]byte) error
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
SaveStateSummary(ctx context.Context, summary *ethereum_beacon_p2p_v1.StateSummary) error
SaveStateSummaries(ctx context.Context, summaries []*ethereum_beacon_p2p_v1.StateSummary) error
// Slashing operations.
SaveProposerSlashing(ctx context.Context, slashing *eth.ProposerSlashing) error
SaveAttesterSlashing(ctx context.Context, slashing *eth.AttesterSlashing) error
@@ -102,8 +107,8 @@ type NoHeadAccessDatabase interface {
SaveArchivedCommitteeInfo(ctx context.Context, epoch uint64, info *ethereum_beacon_p2p_v1.ArchivedCommitteeInfo) error
SaveArchivedBalances(ctx context.Context, epoch uint64, balances []uint64) error
SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *eth.ValidatorParticipation) error
SaveArchivedPointState(ctx context.Context, state *state.BeaconState, index uint64) error
SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error
SaveLastArchivedIndex(ctx context.Context, index uint64) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.

View File

@@ -238,6 +238,11 @@ func (e Exporter) SaveStateSummary(ctx context.Context, summary *pb.StateSummary
return e.db.SaveStateSummary(ctx, summary)
}
// SaveStateSummaries -- passthrough.
func (e Exporter) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSummary) error {
return e.db.SaveStateSummaries(ctx, summaries)
}
// SaveStates -- passthrough.
func (e Exporter) SaveStates(ctx context.Context, states []*state.BeaconState, blockRoots [][32]byte) error {
return e.db.SaveStates(ctx, states, blockRoots)
@@ -328,21 +333,11 @@ func (e Exporter) SavePowchainData(ctx context.Context, data *db.ETH1ChainData)
return e.db.SavePowchainData(ctx, data)
}
// SaveArchivedPointState -- passthrough
func (e Exporter) SaveArchivedPointState(ctx context.Context, state *state.BeaconState, index uint64) error {
return e.db.SaveArchivedPointState(ctx, state, index)
}
// SaveArchivedPointRoot -- passthrough
func (e Exporter) SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error {
return e.db.SaveArchivedPointRoot(ctx, blockRoot, index)
}
// ArchivedPointState -- passthrough
func (e Exporter) ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error) {
return e.db.ArchivedPointState(ctx, index)
}
// ArchivedPointRoot -- passthrough
func (e Exporter) ArchivedPointRoot(ctx context.Context, index uint64) [32]byte {
return e.db.ArchivedPointRoot(ctx, index)
@@ -352,3 +347,33 @@ func (e Exporter) ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
func (e Exporter) HasArchivedPoint(ctx context.Context, index uint64) bool {
return e.db.HasArchivedPoint(ctx, index)
}
// LastArchivedIndexRoot -- passthrough
func (e Exporter) LastArchivedIndexRoot(ctx context.Context) [32]byte {
return e.db.LastArchivedIndexRoot(ctx)
}
// HighestSlotBlocks -- passthrough
func (e Exporter) HighestSlotBlocks(ctx context.Context) ([]*ethpb.SignedBeaconBlock, error) {
return e.db.HighestSlotBlocks(ctx)
}
// HighestSlotBlocksBelow -- passthrough
func (e Exporter) HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*ethpb.SignedBeaconBlock, error) {
return e.db.HighestSlotBlocksBelow(ctx, slot)
}
// HighestSlotStates -- passthrough
func (e Exporter) HighestSlotStates(ctx context.Context) ([]*state.BeaconState, error) {
return e.db.HighestSlotStates(ctx)
}
// HighestSlotStatesBelow -- passthrough
func (e Exporter) HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*state.BeaconState, error) {
return e.db.HighestSlotStatesBelow(ctx, slot)
}
// SaveLastArchivedIndex -- passthrough
func (e Exporter) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
return e.db.SaveLastArchivedIndex(ctx, index)
}

View File

@@ -8,6 +8,7 @@ go_library(
"attestations.go",
"backup.go",
"blocks.go",
"check_historical_state.go",
"checkpoint.go",
"deposit_contract.go",
"encoding.go",
@@ -15,6 +16,7 @@ go_library(
"kv.go",
"operations.go",
"powchain.go",
"regen_historical_states.go",
"schema.go",
"slashings.go",
"state.go",
@@ -25,27 +27,32 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/cmd:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_boltdb_bolt//:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_golang_snappy//:go_default_library",
"@com_github_mdlayher_prombolt//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_etcd_go_bbolt//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -71,6 +78,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",

View File

@@ -4,9 +4,9 @@ import (
"context"
"encoding/binary"
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -2,34 +2,13 @@ package kv
import (
"context"
"encoding/binary"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// SaveArchivedPointState saves an archived point state to the DB. This is used for cold state management.
// An archive point index is `slot / slots_per_archive_point`.
func (k *Store) SaveArchivedPointState(ctx context.Context, state *state.BeaconState, index uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedPointState")
defer span.End()
if state == nil {
return errors.New("nil state")
}
enc, err := encode(state.InnerStateUnsafe())
if err != nil {
return err
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexStateBucket)
return bucket.Put(uint64ToBytes(index), enc)
})
}
// SaveArchivedPointRoot saves an archived point root to the DB. This is used for cold state management.
func (k *Store) SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedPointRoot")
@@ -41,30 +20,49 @@ func (k *Store) SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, i
})
}
// ArchivedPointState returns the state of an archived point from the DB.
// This is essential for cold state management and to restore a cold state.
func (k *Store) ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointState")
// SaveLastArchivedIndex to the db.
func (k *Store) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
var s *pb.BeaconState
err := k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexStateBucket)
enc := bucket.Get(uint64ToBytes(index))
if enc == nil {
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
return bucket.Put(lastArchivedIndexKey, uint64ToBytes(index))
})
}
// LastArchivedIndex from the db.
func (k *Store) LastArchivedIndex(ctx context.Context) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndex")
defer span.End()
var index uint64
err := k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
b := bucket.Get(lastArchivedIndexKey)
index = binary.LittleEndian.Uint64(b)
return nil
})
return index, err
}
// LastArchivedIndexRoot from the db.
func (k *Store) LastArchivedIndexRoot(ctx context.Context) [32]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndexRoot")
defer span.End()
var blockRoot []byte
// #nosec G104. Always returns nil.
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := bucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil {
return nil
}
var err error
s, err = createState(enc)
return err
blockRoot = bucket.Get(lastArchivedIndex)
return nil
})
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
return state.InitializeFromProtoUnsafe(s)
return bytesutil.ToBytes32(blockRoot)
}
// ArchivedPointRoot returns the block root of an archived point from the DB.
@@ -92,9 +90,7 @@ func (k *Store) HasArchivedPoint(ctx context.Context, index uint64) bool {
// #nosec G104. Always returns nil.
k.db.View(func(tx *bolt.Tx) error {
iBucket := tx.Bucket(archivedIndexRootBucket)
sBucket := tx.Bucket(archivedIndexStateBucket)
exists = iBucket.Get(uint64ToBytes(index)) != nil &&
sBucket.Get(uint64ToBytes(index)) != nil
exists = iBucket.Get(uint64ToBytes(index)) != nil
return nil
})
return exists

View File

@@ -2,11 +2,7 @@ package kv
import (
"context"
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
func TestArchivedPointIndexRoot_CanSaveRetrieve(t *testing.T) {
@@ -30,63 +26,29 @@ func TestArchivedPointIndexRoot_CanSaveRetrieve(t *testing.T) {
}
}
func TestArchivedPointIndexState_CanSaveRetrieve(t *testing.T) {
func TestLastArchivedPoint_CanRetrieve(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
i1 := uint64(100)
s := &pb.BeaconState{Slot: 100}
st, err := state.InitializeFromProto(s)
if err != nil {
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'A'}, 1); err != nil {
t.Fatal(err)
}
received, err := db.ArchivedPointState(ctx, i1)
if err != nil {
t.Fatal(err)
}
if received != nil {
t.Fatal("Should not have been saved")
}
if err := db.SaveArchivedPointState(ctx, st, i1); err != nil {
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'B'}, 3); err != nil {
t.Fatal(err)
}
received, err = db.ArchivedPointState(ctx, i1)
if err != nil {
if err := db.SaveLastArchivedIndex(ctx, 1); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(received, st) {
t.Error("Should have been saved")
}
}
func TestArchivedPointIndexHas_CanRetrieve(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
i1 := uint64(100)
s := &pb.BeaconState{Slot: 100}
st, err := state.InitializeFromProto(s)
if err != nil {
t.Fatal(err)
}
r1 := [32]byte{'A'}
if db.HasArchivedPoint(ctx, i1) {
t.Fatal("Should have have an archived point")
}
if err := db.SaveArchivedPointState(ctx, st, i1); err != nil {
t.Fatal(err)
}
if db.HasArchivedPoint(ctx, i1) {
t.Fatal("Should have have an archived point")
}
if err := db.SaveArchivedPointRoot(ctx, r1, i1); err != nil {
t.Fatal(err)
}
if !db.HasArchivedPoint(ctx, i1) {
t.Fatal("Should have an archived point")
if db.LastArchivedIndexRoot(ctx) != [32]byte{'A'} {
t.Error("Did not get wanted root")
}
if err := db.SaveLastArchivedIndex(ctx, 3); err != nil {
t.Fatal(err)
}
if db.LastArchivedIndexRoot(ctx) != [32]byte{'B'} {
t.Error("Did not get wanted root")
}
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
@@ -12,6 +11,7 @@ import (
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/prysmaticlabs/prysm/shared/traceutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -6,9 +6,9 @@ import (
"os"
"path"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -4,18 +4,21 @@ import (
"bytes"
"context"
"fmt"
"math"
"strconv"
"github.com/boltdb/bolt"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
log "github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -69,50 +72,11 @@ func (k *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]*ethpb.Si
err := k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
// If no filter criteria are specified, return an error.
if f == nil {
return errors.New("must specify a filter criteria for retrieving blocks")
}
// Creates a list of indices from the passed in filter values, such as:
// []byte("0x2093923") in the parent root indices bucket to be used for looking up
// block roots that were stored under each of those indices for O(1) lookup.
indicesByBucket, err := createBlockIndicesFromFilters(f)
keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil {
return errors.Wrap(err, "could not determine lookup indices")
return err
}
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange := fetchBlockRootsBySlotRange(
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
filtersMap[filters.EndSlot],
filtersMap[filters.StartEpoch],
filtersMap[filters.EndEpoch],
filtersMap[filters.SlotStep],
)
// Once we have a list of block roots that correspond to each
// lookup index, we find the intersection across all of them and use
// that list of roots to lookup the block. These block will
// meet the filter criteria.
indices := lookupValuesForIndices(indicesByBucket, tx)
keys := rootsBySlotRange
if len(indices) > 0 {
// If we have found indices that meet the filter criteria, and there are also
// block roots that meet the slot range filter criteria, we find the intersection
// between these two sets of roots.
if len(rootsBySlotRange) > 0 {
joined := append([][][]byte{keys}, indices...)
keys = sliceutil.IntersectionByteSlices(joined...)
} else {
// If we have found indices that meet the filter criteria, but there are no block roots
// that meet the slot range filter criteria, we find the intersection
// of the regular filter indices.
keys = sliceutil.IntersectionByteSlices(indices...)
}
}
for i := 0; i < len(keys); i++ {
encoded := bkt.Get(keys[i])
block := &ethpb.SignedBeaconBlock{}
@@ -132,48 +96,11 @@ func (k *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
defer span.End()
blockRoots := make([][32]byte, 0)
err := k.db.View(func(tx *bolt.Tx) error {
// If no filter criteria are specified, return an error.
if f == nil {
return errors.New("must specify a filter criteria for retrieving block roots")
}
// Creates a list of indices from the passed in filter values, such as:
// []byte("0x2093923") in the parent root indices bucket to be used for looking up
// block roots that were stored under each of those indices for O(1) lookup.
indicesByBucket, err := createBlockIndicesFromFilters(f)
keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil {
return errors.Wrap(err, "could not determine lookup indices")
return err
}
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange := fetchBlockRootsBySlotRange(
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
filtersMap[filters.EndSlot],
filtersMap[filters.StartEpoch],
filtersMap[filters.EndEpoch],
filtersMap[filters.SlotStep],
)
// Once we have a list of block roots that correspond to each
// lookup index, we find the intersection across all of them.
indices := lookupValuesForIndices(indicesByBucket, tx)
keys := rootsBySlotRange
if len(indices) > 0 {
// If we have found indices that meet the filter criteria, and there are also
// block roots that meet the slot range filter criteria, we find the intersection
// between these two sets of roots.
if len(rootsBySlotRange) > 0 {
joined := append([][][]byte{keys}, indices...)
keys = sliceutil.IntersectionByteSlices(joined...)
} else {
// If we have found indices that meet the filter criteria, but there are no block roots
// that meet the slot range filter criteria, we find the intersection
// of the regular filter indices.
keys = sliceutil.IntersectionByteSlices(indices...)
}
}
for i := 0; i < len(keys); i++ {
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
}
@@ -221,6 +148,9 @@ func (k *Store) DeleteBlock(ctx context.Context, blockRoot [32]byte) error {
return errors.Wrap(err, "could not delete root for DB indices")
}
k.blockCache.Del(string(blockRoot[:]))
if err := k.clearBlockSlotBitField(ctx, tx, block.Block.Slot); err != nil {
return err
}
return bkt.Delete(blockRoot[:])
})
}
@@ -246,6 +176,9 @@ func (k *Store) DeleteBlocks(ctx context.Context, blockRoots [][32]byte) error {
return errors.Wrap(err, "could not delete root for DB indices")
}
k.blockCache.Del(string(blockRoot[:]))
if err := k.clearBlockSlotBitField(ctx, tx, block.Block.Slot); err != nil {
return err
}
if err := bkt.Delete(blockRoot[:]); err != nil {
return err
}
@@ -258,7 +191,7 @@ func (k *Store) DeleteBlocks(ctx context.Context, blockRoots [][32]byte) error {
func (k *Store) SaveBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
defer span.End()
blockRoot, err := ssz.HashTreeRoot(signed.Block)
blockRoot, err := stateutil.BlockRoot(signed.Block)
if err != nil {
return err
}
@@ -266,6 +199,10 @@ func (k *Store) SaveBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil
}
return k.db.Update(func(tx *bolt.Tx) error {
if err := k.setBlockSlotBitField(ctx, tx, signed.Block.Slot); err != nil {
return err
}
bkt := tx.Bucket(blocksBucket)
if existingBlock := bkt.Get(blockRoot[:]); existingBlock != nil {
return nil
@@ -289,14 +226,18 @@ func (k *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBloc
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for _, block := range blocks {
blockRoot, err := ssz.HashTreeRoot(block.Block)
if err := k.setBlockSlotBitField(ctx, tx, block.Block.Slot); err != nil {
return err
}
blockRoot, err := stateutil.BlockRoot(block.Block)
if err != nil {
return err
}
bkt := tx.Bucket(blocksBucket)
if existingBlock := bkt.Get(blockRoot[:]); existingBlock != nil {
return nil
continue
}
enc, err := encode(block)
if err != nil {
@@ -307,6 +248,7 @@ func (k *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBloc
return errors.Wrap(err, "could not update DB indices")
}
k.blockCache.Set(string(blockRoot[:]), block, int64(len(enc)))
if err := bkt.Put(blockRoot[:], enc); err != nil {
return err
}
@@ -320,9 +262,16 @@ func (k *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil && !k.stateSummaryCache.Has(blockRoot) {
return errors.New("no state summary found with head block root")
}
} else {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
}
}
bucket := tx.Bucket(blocksBucket)
return bucket.Put(headBlockRootKey, blockRoot[:])
})
@@ -356,6 +305,189 @@ func (k *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
})
}
// HighestSlotBlocks returns the blocks with the highest slot from the db.
func (k *Store) HighestSlotBlocks(ctx context.Context) ([]*ethpb.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocks")
defer span.End()
blocks := make([]*ethpb.SignedBeaconBlock, 0)
err := k.db.View(func(tx *bolt.Tx) error {
sBkt := tx.Bucket(slotsHasObjectBucket)
savedSlots := sBkt.Get(savedBlockSlotsKey)
highestIndex, err := bytesutil.HighestBitIndex(savedSlots)
if err != nil {
return err
}
blocks, err = k.blocksAtSlotBitfieldIndex(ctx, tx, highestIndex)
if err != nil {
return err
}
return nil
})
return blocks, err
}
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
func (k *Store) HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*ethpb.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
defer span.End()
blocks := make([]*ethpb.SignedBeaconBlock, 0)
err := k.db.View(func(tx *bolt.Tx) error {
sBkt := tx.Bucket(slotsHasObjectBucket)
savedSlots := sBkt.Get(savedBlockSlotsKey)
if len(savedSlots) == 0 {
savedSlots = bytesutil.MakeEmptyBitlists(int(slot))
}
highestIndex, err := bytesutil.HighestBitIndexAt(savedSlots, int(slot))
if err != nil {
return err
}
blocks, err = k.blocksAtSlotBitfieldIndex(ctx, tx, highestIndex)
if err != nil {
return err
}
return nil
})
return blocks, err
}
// blocksAtSlotBitfieldIndex retrieves the blocks in DB given the input index. The index represents
// the position of the slot bitfield the saved block maps to.
func (k *Store) blocksAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, index int) ([]*ethpb.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blocksAtSlotBitfieldIndex")
defer span.End()
highestSlot := index - 1
highestSlot = int(math.Max(0, float64(highestSlot)))
if highestSlot == 0 {
gBlock, err := k.GenesisBlock(ctx)
if err != nil {
return nil, err
}
return []*ethpb.SignedBeaconBlock{gBlock}, nil
}
f := filters.NewFilter().SetStartSlot(uint64(highestSlot)).SetEndSlot(uint64(highestSlot))
keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil {
return nil, err
}
blocks := make([]*ethpb.SignedBeaconBlock, 0, len(keys))
bBkt := tx.Bucket(blocksBucket)
for i := 0; i < len(keys); i++ {
encoded := bBkt.Get(keys[i])
block := &ethpb.SignedBeaconBlock{}
if err := decode(encoded, block); err != nil {
return nil, err
}
blocks = append(blocks, block)
}
return blocks, err
}
// setBlockSlotBitField sets the block slot bit in DB.
// This helps to track which slot has a saved block in db.
func (k *Store) setBlockSlotBitField(ctx context.Context, tx *bolt.Tx, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.setBlockSlotBitField")
defer span.End()
k.blockSlotBitLock.Lock()
defer k.blockSlotBitLock.Unlock()
bucket := tx.Bucket(slotsHasObjectBucket)
slotBitfields := bucket.Get(savedBlockSlotsKey)
// Copy is needed to avoid unsafe pointer conversions.
// See: https://github.com/etcd-io/bbolt/pull/201
tmp := make([]byte, len(slotBitfields))
copy(tmp, slotBitfields)
slotBitfields = bytesutil.SetBit(tmp, int(slot))
return bucket.Put(savedBlockSlotsKey, slotBitfields)
}
// clearBlockSlotBitField clears the block slot bit in DB.
// This helps to track which slot has a saved block in db.
func (k *Store) clearBlockSlotBitField(ctx context.Context, tx *bolt.Tx, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.clearBlockSlotBitField")
defer span.End()
k.blockSlotBitLock.Lock()
defer k.blockSlotBitLock.Unlock()
bucket := tx.Bucket(slotsHasObjectBucket)
slotBitfields := bucket.Get(savedBlockSlotsKey)
// Copy is needed to avoid unsafe pointer conversions.
// See: https://github.com/etcd-io/bbolt/pull/201
tmp := make([]byte, len(slotBitfields))
copy(tmp, slotBitfields)
slotBitfields = bytesutil.ClearBit(tmp, int(slot))
return bucket.Put(savedBlockSlotsKey, slotBitfields)
}
// getBlockRootsByFilter retrieves the block roots given the filter criteria.
func getBlockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.getBlockRootsByFilter")
defer span.End()
// If no filter criteria are specified, return an error.
if f == nil {
return nil, errors.New("must specify a filter criteria for retrieving blocks")
}
// Creates a list of indices from the passed in filter values, such as:
// []byte("0x2093923") in the parent root indices bucket to be used for looking up
// block roots that were stored under each of those indices for O(1) lookup.
indicesByBucket, err := createBlockIndicesFromFilters(f)
if err != nil {
return nil, errors.Wrap(err, "could not determine lookup indices")
}
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange := fetchBlockRootsBySlotRange(
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
filtersMap[filters.EndSlot],
filtersMap[filters.StartEpoch],
filtersMap[filters.EndEpoch],
filtersMap[filters.SlotStep],
)
// Once we have a list of block roots that correspond to each
// lookup index, we find the intersection across all of them and use
// that list of roots to lookup the block. These block will
// meet the filter criteria.
indices := lookupValuesForIndices(indicesByBucket, tx)
keys := rootsBySlotRange
if len(indices) > 0 {
// If we have found indices that meet the filter criteria, and there are also
// block roots that meet the slot range filter criteria, we find the intersection
// between these two sets of roots.
if len(rootsBySlotRange) > 0 {
joined := append([][][]byte{keys}, indices...)
keys = sliceutil.IntersectionByteSlices(joined...)
} else {
// If we have found indices that meet the filter criteria, but there are no block roots
// that meet the slot range filter criteria, we find the intersection
// of the regular filter indices.
keys = sliceutil.IntersectionByteSlices(indices...)
}
}
return keys, nil
}
// fetchBlockRootsBySlotRange looks into a boltDB bucket and performs a binary search
// range scan using sorted left-padded byte keys using a start slot and an end slot.
// If both the start and end slot are the same, and are 0, the function returns nil.

View File

@@ -419,3 +419,261 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
}
}
}
func TestStore_SaveBlock_CanGetHighest(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
if err := db.SaveBlock(ctx, block); err != nil {
t.Fatal(err)
}
highestSavedBlock, err := db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", block, highestSavedBlock)
}
block = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 999}}
if err := db.SaveBlock(ctx, block); err != nil {
t.Fatal(err)
}
highestSavedBlock, err = db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", block, highestSavedBlock)
}
block = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 300000000}} // 100 years.
if err := db.SaveBlock(ctx, block); err != nil {
t.Fatal(err)
}
highestSavedBlock, err = db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", block, highestSavedBlock)
}
}
func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
block1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
db.SaveBlock(ctx, block1)
block2 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 10}}
db.SaveBlock(ctx, block2)
block3 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 100}}
db.SaveBlock(ctx, block3)
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block1, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block1, highestAt)
}
highestAt, err = db.HighestSlotBlocksBelow(ctx, 11)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block2, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block2, highestAt)
}
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block3, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block3, highestAt)
}
r3, _ := ssz.HashTreeRoot(block3.Block)
db.DeleteBlock(ctx, r3)
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block2, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block2, highestAt)
}
}
func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
genesisBlock := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
genesisRoot, _ := ssz.HashTreeRoot(genesisBlock.Block)
db.SaveGenesisBlockRoot(ctx, genesisRoot)
db.SaveBlock(ctx, genesisBlock)
block1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
db.SaveBlock(ctx, block1)
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(block1, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block1, highestAt)
}
highestAt, err = db.HighestSlotBlocksBelow(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(genesisBlock, highestAt[0]) {
t.Errorf("Wanted %v, received %v", genesisBlock, highestAt)
}
highestAt, err = db.HighestSlotBlocksBelow(ctx, 0)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(genesisBlock, highestAt[0]) {
t.Errorf("Wanted %v, received %v", genesisBlock, highestAt)
}
}
func TestStore_SaveBlocks_CanGetHighest(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
b := make([]*ethpb.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b[i] = &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ParentRoot: []byte("parent"),
Slot: uint64(i),
},
}
}
if err := db.SaveBlocks(ctx, b); err != nil {
t.Fatal(err)
}
highestSavedBlock, err := db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(b[len(b)-1], highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", b[len(b)-1], highestSavedBlock)
}
}
func TestStore_SaveBlocks_HasCachedBlocks(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
b := make([]*ethpb.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b[i] = &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ParentRoot: []byte("parent"),
Slot: uint64(i),
},
}
}
if err := db.SaveBlock(ctx, b[0]); err != nil {
t.Fatal(err)
}
if err := db.SaveBlocks(ctx, b); err != nil {
t.Fatal(err)
}
f := filters.NewFilter().SetStartSlot(0).SetEndSlot(500)
blks, err := db.Blocks(ctx, f)
if err != nil {
t.Fatal(err)
}
if len(blks) != 500 {
t.Log(len(blks))
t.Error("Did not get wanted blocks")
}
}
func TestStore_DeleteBlock_CanGetHighest(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
b50 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 50}}
if err := db.SaveBlock(ctx, b50); err != nil {
t.Fatal(err)
}
highestSavedBlock, err := db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(b50, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", b50, highestSavedBlock)
}
b51 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 51}}
r51, _ := ssz.HashTreeRoot(b51.Block)
if err := db.SaveBlock(ctx, b51); err != nil {
t.Fatal(err)
}
highestSavedBlock, err = db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(b51, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", b51, highestSavedBlock)
}
if err := db.DeleteBlock(ctx, r51); err != nil {
t.Fatal(err)
}
highestSavedBlock, err = db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(b50, highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", b50, highestSavedBlock)
}
}
func TestStore_DeleteBlocks_CanGetHighest(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
b := make([]*ethpb.SignedBeaconBlock, 100)
r := make([][32]byte, 100)
for i := 0; i < 100; i++ {
b[i] = &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ParentRoot: []byte("parent"),
Slot: uint64(i),
},
}
r[i], _ = ssz.HashTreeRoot(b[i].Block)
}
if err := db.SaveBlocks(ctx, b); err != nil {
t.Fatal(err)
}
if err := db.DeleteBlocks(ctx, [][32]byte{r[99], r[98], r[97]}); err != nil {
t.Fatal(err)
}
highestSavedBlock, err := db.HighestSlotBlocks(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(b[96], highestSavedBlock[0]) {
t.Errorf("Wanted %v, received %v", b[len(b)-1], highestSavedBlock)
}
}

View File

@@ -0,0 +1,55 @@
package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
bolt "go.etcd.io/bbolt"
)
var historicalStateDeletedKey = []byte("historical-states-deleted")
func (kv *Store) ensureNewStateServiceCompatible(ctx context.Context) error {
if !featureconfig.Get().NewStateMgmt {
return kv.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(newStateServiceCompatibleBucket)
return bkt.Put(historicalStateDeletedKey, []byte{0x01})
})
}
var historicalStateDeleted bool
kv.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(newStateServiceCompatibleBucket)
v := bkt.Get(historicalStateDeletedKey)
historicalStateDeleted = len(v) == 1 && v[0] == 0x01
return nil
})
regenHistoricalStatesConfirmed := false
var err error
if historicalStateDeleted {
actionText := "Looks like you stopped using --new-state-mgmt. To reuse it, the node will need " +
"to generate and save historical states. The process may take a while, - do you want to proceed? (Y/N)"
deniedText := "Historical states will not be generated. Please remove usage --new-state-mgmt"
regenHistoricalStatesConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
if !regenHistoricalStatesConfirmed {
return errors.New("exiting... please do not run with flag --new-state-mgmt")
}
if err := kv.regenHistoricalStates(ctx); err != nil {
return errors.Wrap(err, "could not regenerate historical states, please retry")
}
}
return kv.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(newStateServiceCompatibleBucket)
return bkt.Put(historicalStateDeletedKey, []byte{0x00})
})
}

View File

@@ -4,9 +4,11 @@ import (
"context"
"errors"
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -63,12 +65,18 @@ func (k *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil && !k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
return bucket.Put(justifiedCheckpointKey, enc)
})
@@ -85,17 +93,24 @@ func (k *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil && !k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
if err := bucket.Put(finalizedCheckpointKey, enc); err != nil {
return err
}
return k.updateFinalizedBlockRoots(ctx, tx, checkpoint)
})
}

View File

@@ -4,8 +4,8 @@ import (
"context"
"fmt"
"github.com/boltdb/bolt"
"github.com/ethereum/go-ethereum/common"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -5,12 +5,12 @@ import (
"context"
"fmt"
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/traceutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -55,6 +55,7 @@ func (k *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return err
}
}
blockRoots, err := k.BlockRoots(ctx, filters.NewFilter().
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
SetEndEpoch(checkpoint.Epoch+1),

View File

@@ -1,16 +1,19 @@
package kv
import (
"context"
"os"
"path"
"sync"
"time"
"github.com/boltdb/bolt"
"github.com/dgraph-io/ristretto"
"github.com/mdlayher/prombolt"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
prombolt "github.com/prysmaticlabs/prombbolt"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
bolt "go.etcd.io/bbolt"
)
var _ = iface.Database(&Store{})
@@ -35,12 +38,15 @@ type Store struct {
databasePath string
blockCache *ristretto.Cache
validatorIndexCache *ristretto.Cache
stateSlotBitLock sync.Mutex
blockSlotBitLock sync.Mutex
stateSummaryCache *cache.StateSummaryCache
}
// NewKVStore initializes a new boltDB key-value store at the directory
// path specified, creates the kv-buckets based on the schema, and stores
// an open connection db object as a property of the Store struct.
func NewKVStore(dirPath string) (*Store, error) {
func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*Store, error) {
if err := os.MkdirAll(dirPath, 0700); err != nil {
return nil, err
}
@@ -76,6 +82,7 @@ func NewKVStore(dirPath string) (*Store, error) {
databasePath: dirPath,
blockCache: blockCache,
validatorIndexCache: validatorCache,
stateSummaryCache: stateSummaryCache,
}
if err := kv.db.Update(func(tx *bolt.Tx) error {
@@ -97,7 +104,7 @@ func NewKVStore(dirPath string) (*Store, error) {
powchainBucket,
stateSummaryBucket,
archivedIndexRootBucket,
archivedIndexStateBucket,
slotsHasObjectBucket,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,
@@ -107,13 +114,17 @@ func NewKVStore(dirPath string) (*Store, error) {
blockSlotIndicesBucket,
blockParentRootIndicesBucket,
finalizedBlockRootsIndexBucket,
// Migration bucket.
migrationBucket,
// New State Management service bucket.
newStateServiceCompatibleBucket,
)
}); err != nil {
return nil, err
}
if err := kv.ensureNewStateServiceCompatible(context.Background()); err != nil {
return nil, err
}
err = prometheus.Register(createBoltCollector(kv.db))
return kv, err

View File

@@ -8,6 +8,7 @@ import (
"path"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
@@ -21,7 +22,7 @@ func setupDB(t testing.TB) *Store {
if err := os.RemoveAll(path); err != nil {
t.Fatalf("Failed to remove directory: %v", err)
}
db, err := NewKVStore(path)
db, err := NewKVStore(path, cache.NewStateSummaryCache())
if err != nil {
t.Fatalf("Failed to instantiate DB: %v", err)
}

View File

@@ -3,9 +3,9 @@ package kv
import (
"context"
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -3,9 +3,9 @@ package kv
import (
"context"
"github.com/boltdb/bolt"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/proto/beacon/db"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -0,0 +1,194 @@
package kv
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
transition "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (kv *Store) regenHistoricalStates(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "db.regenHistoricalStates")
defer span.End()
genesisState, err := kv.GenesisState(ctx)
if err != nil {
return err
}
currentState := genesisState.Copy()
startSlot := genesisState.Slot()
// Restore from last archived point if this process was previously interrupted.
slotsPerArchivedPoint := params.BeaconConfig().SlotsPerArchivedPoint
lastArchivedIndex, err := kv.LastArchivedIndex(ctx)
if err != nil {
return err
}
if lastArchivedIndex > 0 {
archivedIndexStart := lastArchivedIndex - 1
wantedSlotBelow := archivedIndexStart*slotsPerArchivedPoint + 1
states, err := kv.HighestSlotStatesBelow(ctx, wantedSlotBelow)
if err != nil {
return err
}
if len(states) == 0 {
return errors.New("states can't be empty")
}
if states[0] == nil {
return errors.New("nil last state")
}
currentState = states[0]
startSlot = currentState.Slot()
}
lastSavedBlockArchivedIndex, err := kv.lastSavedBlockArchivedIndex(ctx)
if err != nil {
return err
}
for i := lastArchivedIndex; i <= lastSavedBlockArchivedIndex; i++ {
targetSlot := startSlot + slotsPerArchivedPoint
filter := filters.NewFilter().SetStartSlot(startSlot + 1).SetEndSlot(targetSlot)
blocks, err := kv.Blocks(ctx, filter)
if err != nil {
return err
}
// Replay blocks and replay slots if necessary.
if len(blocks) > 0 {
for i := 0; i < len(blocks); i++ {
if blocks[i].Block.Slot == 0 {
continue
}
currentState, err = regenHistoricalStateTransition(ctx, currentState, blocks[i])
if err != nil {
return err
}
}
}
if targetSlot > currentState.Slot() {
currentState, err = regenHistoricalStateProcessSlots(ctx, currentState, targetSlot)
if err != nil {
return err
}
}
if len(blocks) > 0 {
// Save the historical root, state and highest index to the DB.
if helpers.IsEpochStart(currentState.Slot()) && currentState.Slot()%slotsPerArchivedPoint == 0 && blocks[len(blocks)-1].Block.Slot&slotsPerArchivedPoint == 0 {
if err := kv.saveArchivedInfo(ctx, currentState, blocks, i); err != nil {
return err
}
log.WithFields(log.Fields{
"currentArchivedIndex/totalArchivedIndices": fmt.Sprintf("%d/%d", i, lastSavedBlockArchivedIndex),
"archivedStateSlot": currentState.Slot()}).Info("Saved historical state")
}
}
startSlot += slotsPerArchivedPoint
}
return nil
}
// This runs state transition to recompute historical state.
func regenHistoricalStateTransition(
ctx context.Context,
state *stateTrie.BeaconState,
signed *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
if signed == nil || signed.Block == nil {
return nil, errors.New("block can't be nil")
}
ctx, span := trace.StartSpan(ctx, "db.regenHistoricalStateTransition")
defer span.End()
var err error
state, err = regenHistoricalStateProcessSlots(ctx, state, signed.Block.Slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slot")
}
state, err = transition.ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return nil, errors.Wrap(err, "could not process block")
}
return state, nil
}
// This runs slot transition to recompute historical state.
func regenHistoricalStateProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "db.regenHistoricalStateProcessSlots")
defer span.End()
if state == nil {
return nil, errors.New("state can't be nil")
}
if state.Slot() > slot {
err := fmt.Errorf("expected state.slot %d < slot %d", state.Slot(), slot)
return nil, err
}
if state.Slot() == slot {
return state, nil
}
for state.Slot() < slot {
state, err := transition.ProcessSlot(ctx, state)
if err != nil {
return nil, errors.Wrap(err, "could not process slot")
}
if transition.CanProcessEpoch(state) {
state, err = transition.ProcessEpochPrecompute(ctx, state)
if err != nil {
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
}
state.SetSlot(state.Slot() + 1)
}
return state, nil
}
// This retrieves the last saved block's archived index.
func (kv *Store) lastSavedBlockArchivedIndex(ctx context.Context) (uint64, error) {
b, err := kv.HighestSlotBlocks(ctx)
if err != nil {
return 0, err
}
if len(b) == 0 {
return 0, errors.New("blocks can't be empty")
}
if b[0] == nil {
return 0, errors.New("nil last block")
}
lastSavedBlockSlot := b[0].Block.Slot
slotsPerArchivedPoint := params.BeaconConfig().SlotsPerArchivedPoint
lastSavedBlockArchivedIndex := lastSavedBlockSlot/slotsPerArchivedPoint - 1
return lastSavedBlockArchivedIndex, nil
}
// This saved archived info (state, root, index) into the db.
func (kv *Store) saveArchivedInfo(ctx context.Context,
currentState *stateTrie.BeaconState,
blocks []*ethpb.SignedBeaconBlock,
archivedIndex uint64) error {
lastBlocksRoot, err := ssz.HashTreeRoot(blocks[len(blocks)-1].Block)
if err != nil {
return nil
}
if err := kv.SaveState(ctx, currentState, lastBlocksRoot); err != nil {
return err
}
if err := kv.SaveArchivedPointRoot(ctx, lastBlocksRoot, archivedIndex); err != nil {
return err
}
if err := kv.SaveLastArchivedIndex(ctx, archivedIndex); err != nil {
return err
}
return nil
}

View File

@@ -23,7 +23,7 @@ var (
archivedValidatorParticipationBucket = []byte("archived-validator-participation")
powchainBucket = []byte("powchain")
archivedIndexRootBucket = []byte("archived-index-root")
archivedIndexStateBucket = []byte("archived-index-state")
slotsHasObjectBucket = []byte("slots-has-objects")
// Key indices buckets.
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
@@ -42,7 +42,10 @@ var (
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastArchivedIndexKey = []byte("last-archived")
savedBlockSlotsKey = []byte("saved-block-slots")
savedStateSlotsKey = []byte("saved-state-slots")
// Migration bucket.
migrationBucket = []byte("migrations")
// New state management service compatibility bucket.
newStateServiceCompatibleBucket = []byte("new-state-compatible")
)

View File

@@ -3,9 +3,9 @@ package kv
import (
"context"
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -3,13 +3,16 @@ package kv
import (
"bytes"
"context"
"math"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -117,7 +120,10 @@ func (k *Store) SaveState(ctx context.Context, state *state.BeaconState, blockRo
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket)
return bucket.Put(blockRoot[:], enc)
if err := bucket.Put(blockRoot[:], enc); err != nil {
return err
}
return k.setStateSlotBitField(ctx, tx, state.Slot())
})
}
@@ -140,6 +146,9 @@ func (k *Store) SaveStates(ctx context.Context, states []*state.BeaconState, blo
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket)
for i, rt := range blockRoots {
if err := k.setStateSlotBitField(ctx, tx, states[i].Slot()); err != nil {
return err
}
err = bucket.Put(rt[:], multipleEncs[i])
if err != nil {
return err
@@ -184,9 +193,23 @@ func (k *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(blocksBucket)
headBlkRoot := bkt.Get(headBlockRootKey)
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
slot, err := slotByBlockRoot(ctx, tx, blockRoot[:])
if err != nil {
return err
}
if err := k.clearStateSlotBitField(ctx, tx, slot); err != nil {
return err
}
bkt = tx.Bucket(stateBucket)
@@ -222,17 +245,32 @@ func (k *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
return err
}
bkt = tx.Bucket(blocksBucket)
headBlkRoot := bkt.Get(headBlockRootKey)
blockBkt := tx.Bucket(blocksBucket)
headBlkRoot := blockBkt.Get(headBlockRootKey)
bkt = tx.Bucket(stateBucket)
c := bkt.Cursor()
for blockRoot, _ := c.First(); blockRoot != nil; blockRoot, _ = c.Next() {
if rootMap[bytesutil.ToBytes32(blockRoot)] {
// Safe guard against deleting genesis, finalized, or head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("could not delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
slot, err := slotByBlockRoot(ctx, tx, blockRoot)
if err != nil {
return err
}
if err := k.clearStateSlotBitField(ctx, tx, slot); err != nil {
return err
}
if err := c.Delete(); err != nil {
return err
}
@@ -251,3 +289,204 @@ func createState(enc []byte) (*pb.BeaconState, error) {
}
return protoState, nil
}
// slotByBlockRoot retrieves the corresponding slot of the input block root.
func slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []byte) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.slotByBlockRoot")
defer span.End()
if featureconfig.Get().NewStateMgmt {
bkt := tx.Bucket(stateSummaryBucket)
enc := bkt.Get(blockRoot)
if enc == nil {
return 0, errors.New("state summary enc can't be nil")
}
stateSummary := &pb.StateSummary{}
if err := decode(enc, stateSummary); err != nil {
return 0, err
}
return stateSummary.Slot, nil
}
bkt := tx.Bucket(blocksBucket)
enc := bkt.Get(blockRoot)
if enc == nil {
// fallback and check the state.
bkt = tx.Bucket(stateBucket)
enc = bkt.Get(blockRoot)
if enc == nil {
return 0, errors.New("state enc can't be nil")
}
s, err := createState(enc)
if err != nil {
return 0, err
}
if s == nil {
return 0, errors.New("state can't be nil")
}
return s.Slot, nil
}
b := &ethpb.SignedBeaconBlock{}
err := decode(enc, b)
if err != nil {
return 0, err
}
if b.Block == nil {
return 0, errors.New("block can't be nil")
}
return b.Block.Slot, nil
}
// HighestSlotStates returns the states with the highest slot from the db.
// Ideally there should just be one state per slot, but given validator
// can double propose, a single slot could have multiple block roots and
// reuslts states. This returns a list of states.
func (k *Store) HighestSlotStates(ctx context.Context) ([]*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotState")
defer span.End()
var states []*state.BeaconState
err := k.db.View(func(tx *bolt.Tx) error {
slotBkt := tx.Bucket(slotsHasObjectBucket)
savedSlots := slotBkt.Get(savedStateSlotsKey)
highestIndex, err := bytesutil.HighestBitIndex(savedSlots)
if err != nil {
return err
}
states, err = k.statesAtSlotBitfieldIndex(ctx, tx, highestIndex)
return err
})
if err != nil {
return nil, err
}
if len(states) == 0 {
return nil, errors.New("could not get one state")
}
return states, nil
}
// HighestSlotStatesBelow returns the states with the highest slot below the input slot
// from the db. Ideally there should just be one state per slot, but given validator
// can double propose, a single slot could have multiple block roots and
// reuslts states. This returns a list of states.
func (k *Store) HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotStatesBelow")
defer span.End()
var states []*state.BeaconState
err := k.db.View(func(tx *bolt.Tx) error {
slotBkt := tx.Bucket(slotsHasObjectBucket)
savedSlots := slotBkt.Get(savedStateSlotsKey)
if len(savedSlots) == 0 {
savedSlots = bytesutil.MakeEmptyBitlists(int(slot))
}
highestIndex, err := bytesutil.HighestBitIndexAt(savedSlots, int(slot))
if err != nil {
return err
}
states, err = k.statesAtSlotBitfieldIndex(ctx, tx, highestIndex)
return err
})
if err != nil {
return nil, err
}
if len(states) == 0 {
return nil, errors.New("could not get one state")
}
return states, nil
}
// statesAtSlotBitfieldIndex retrieves the states in DB given the input index. The index represents
// the position of the slot bitfield the saved state maps to.
func (k *Store) statesAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, index int) ([]*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.statesAtSlotBitfieldIndex")
defer span.End()
highestSlot := index - 1
highestSlot = int(math.Max(0, float64(highestSlot)))
if highestSlot == 0 {
gState, err := k.GenesisState(ctx)
if err != nil {
return nil, err
}
return []*state.BeaconState{gState}, nil
}
f := filters.NewFilter().SetStartSlot(uint64(highestSlot)).SetEndSlot(uint64(highestSlot))
keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil {
return nil, err
}
if len(keys) == 0 {
return nil, errors.New("could not get one block root to get state")
}
stateBkt := tx.Bucket(stateBucket)
states := make([]*state.BeaconState, 0, len(keys))
for i := range keys {
enc := stateBkt.Get(keys[i][:])
if enc == nil {
continue
}
pbState, err := createState(enc)
if err != nil {
return nil, err
}
s, err := state.InitializeFromProtoUnsafe(pbState)
if err != nil {
return nil, err
}
states = append(states, s)
}
return states, err
}
// setStateSlotBitField sets the state slot bit in DB.
// This helps to track which slot has a saved state in db.
func (k *Store) setStateSlotBitField(ctx context.Context, tx *bolt.Tx, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.setStateSlotBitField")
defer span.End()
k.stateSlotBitLock.Lock()
defer k.stateSlotBitLock.Unlock()
bucket := tx.Bucket(slotsHasObjectBucket)
slotBitfields := bucket.Get(savedStateSlotsKey)
// Copy is needed to avoid unsafe pointer conversions.
// See: https://github.com/etcd-io/bbolt/pull/201
tmp := make([]byte, len(slotBitfields))
copy(tmp, slotBitfields)
slotBitfields = bytesutil.SetBit(tmp, int(slot))
return bucket.Put(savedStateSlotsKey, slotBitfields)
}
// clearStateSlotBitField clears the state slot bit in DB.
// This helps to track which slot has a saved state in db.
func (k *Store) clearStateSlotBitField(ctx context.Context, tx *bolt.Tx, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.clearStateSlotBitField")
defer span.End()
k.stateSlotBitLock.Lock()
defer k.stateSlotBitLock.Unlock()
bucket := tx.Bucket(slotsHasObjectBucket)
slotBitfields := bucket.Get(savedStateSlotsKey)
// Copy is needed to avoid unsafe pointer conversions.
// See: https://github.com/etcd-io/bbolt/pull/201
tmp := make([]byte, len(slotBitfields))
copy(tmp, slotBitfields)
slotBitfields = bytesutil.ClearBit(tmp, int(slot))
return bucket.Put(savedStateSlotsKey, slotBitfields)
}

View File

@@ -3,8 +3,8 @@ package kv
import (
"context"
"github.com/boltdb/bolt"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -23,6 +23,26 @@ func (k *Store) SaveStateSummary(ctx context.Context, summary *pb.StateSummary)
})
}
// SaveStateSummaries saves state summary objects to the DB.
func (k *Store) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSummary) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummaries")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
for _, summary := range summaries {
enc, err := encode(summary)
if err != nil {
return err
}
if err := bucket.Put(summary.Root, enc); err != nil {
return err
}
}
return nil
})
}
// StateSummary returns the state summary object from the db using input block root.
func (k *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.StateSummary")

View File

@@ -15,7 +15,7 @@ func TestStateSummary_CanSaveRretrieve(t *testing.T) {
ctx := context.Background()
r1 := bytesutil.ToBytes32([]byte{'A'})
r2 := bytesutil.ToBytes32([]byte{'B'})
s1 := &pb.StateSummary{Slot: 1, Root: r1[:], BoundaryRoot: r2[:]}
s1 := &pb.StateSummary{Slot: 1, Root: r1[:]}
// State summary should not exist yet.
if db.HasStateSummary(ctx, r1) {
@@ -38,7 +38,7 @@ func TestStateSummary_CanSaveRretrieve(t *testing.T) {
}
// Save a new state summary.
s2 := &pb.StateSummary{Slot: 2, Root: r2[:], BoundaryRoot: r1[:]}
s2 := &pb.StateSummary{Slot: 2, Root: r2[:]}
// State summary should not exist yet.
if db.HasStateSummary(ctx, r2) {

View File

@@ -5,6 +5,7 @@ import (
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -286,3 +287,214 @@ func TestStore_DeleteHeadState(t *testing.T) {
t.Error("Did not receive wanted error")
}
}
func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
s0 := &pb.BeaconState{Slot: 1}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
r, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err := state.InitializeFromProto(s0)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r); err != nil {
t.Fatal(err)
}
s1 := &pb.BeaconState{Slot: 999}
b = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 999}}
r1, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err = state.InitializeFromProto(s1)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r1); err != nil {
t.Fatal(err)
}
highest, err := db.HighestSlotStates(context.Background())
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s1) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s1)
}
s2 := &pb.BeaconState{Slot: 1000}
b = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1000}}
r2, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err = state.InitializeFromProto(s2)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r2); err != nil {
t.Fatal(err)
}
highest, err = db.HighestSlotStates(context.Background())
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s2) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s2)
}
db.DeleteState(context.Background(), r2)
highest, err = db.HighestSlotStates(context.Background())
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s1) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s1)
}
db.DeleteState(context.Background(), r1)
highest, err = db.HighestSlotStates(context.Background())
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s0) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s1)
}
}
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
s0 := &pb.BeaconState{Slot: 1}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
r, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err := state.InitializeFromProto(s0)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r); err != nil {
t.Fatal(err)
}
s1 := &pb.BeaconState{Slot: 100}
b = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 100}}
r1, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err = state.InitializeFromProto(s1)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r1); err != nil {
t.Fatal(err)
}
highest, err := db.HighestSlotStates(context.Background())
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s1) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s1)
}
s2 := &pb.BeaconState{Slot: 1000}
b = &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1000}}
r2, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err = state.InitializeFromProto(s2)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r2); err != nil {
t.Fatal(err)
}
highest, err = db.HighestSlotStatesBelow(context.Background(), 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s0) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s0)
}
highest, err = db.HighestSlotStatesBelow(context.Background(), 101)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s1) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s1)
}
highest, err = db.HighestSlotStatesBelow(context.Background(), 1001)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s2) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s2)
}
}
func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
s := &pb.BeaconState{}
genesisState, err := state.InitializeFromProto(s)
if err != nil {
t.Fatal(err)
}
genesisRoot := [32]byte{'a'}
db.SaveGenesisBlockRoot(context.Background(), genesisRoot)
db.SaveState(context.Background(), genesisState, genesisRoot)
s0 := &pb.BeaconState{Slot: 1}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
r, _ := ssz.HashTreeRoot(b.Block)
if err := db.SaveBlock(context.Background(), b); err != nil {
t.Fatal(err)
}
st, err := state.InitializeFromProto(s0)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(context.Background(), st, r); err != nil {
t.Fatal(err)
}
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), s0) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s0)
}
highest, err = db.HighestSlotStatesBelow(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe()) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s0)
}
highest, err = db.HighestSlotStatesBelow(context.Background(), 0)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe()) {
t.Errorf("Did not retrieve saved state: %v != %v", highest, s0)
}
}

View File

@@ -3,7 +3,7 @@ package kv
import (
"bytes"
"github.com/boltdb/bolt"
bolt "go.etcd.io/bbolt"
)
// lookupValuesForIndices takes in a list of indices and looks up

View File

@@ -5,9 +5,9 @@ import (
"encoding/binary"
"fmt"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/shared/params"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -7,6 +7,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/testing",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//shared/testutil:go_default_library",

View File

@@ -8,6 +8,7 @@ import (
"path"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -23,7 +24,7 @@ func SetupDB(t testing.TB) db.Database {
if err := os.RemoveAll(p); err != nil {
t.Fatalf("failed to remove directory: %v", err)
}
s, err := kv.NewKVStore(p)
s, err := kv.NewKVStore(p, cache.NewStateSummaryCache())
if err != nil {
t.Fatal(err)
}

View File

@@ -13,6 +13,6 @@ go_library(
deps = [
"//shared/cmd:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,31 +1,31 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// ArchiveEnableFlag defines whether or not the beacon chain should archive
// historical blocks, attestations, and validator set changes.
ArchiveEnableFlag = cli.BoolFlag{
ArchiveEnableFlag = &cli.BoolFlag{
Name: "archive",
Usage: "Whether or not beacon chain should archive historical data including blocks, attestations, and validator set changes",
}
// ArchiveValidatorSetChangesFlag defines whether or not the beacon chain should archive
// historical validator set changes in persistent storage.
ArchiveValidatorSetChangesFlag = cli.BoolFlag{
ArchiveValidatorSetChangesFlag = &cli.BoolFlag{
Name: "archive-validator-set-changes",
Usage: "Whether or not beacon chain should archive historical validator set changes",
}
// ArchiveBlocksFlag defines whether or not the beacon chain should archive
// historical block data in persistent storage.
ArchiveBlocksFlag = cli.BoolFlag{
ArchiveBlocksFlag = &cli.BoolFlag{
Name: "archive-blocks",
Usage: "Whether or not beacon chain should archive historical blocks",
}
// ArchiveAttestationsFlag defines whether or not the beacon chain should archive
// historical attestation data in persistent storage.
ArchiveAttestationsFlag = cli.BoolFlag{
ArchiveAttestationsFlag = &cli.BoolFlag{
Name: "archive-attestations",
Usage: "Whether or not beacon chain should archive historical blocks",
}

View File

@@ -1,100 +1,113 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// NoCustomConfigFlag determines whether to launch a beacon chain using real parameters or demo parameters.
NoCustomConfigFlag = cli.BoolFlag{
Name: "no-custom-config",
Usage: "Run the beacon chain with the real parameters from phase 0.",
}
// HTTPWeb3ProviderFlag provides an HTTP access endpoint to an ETH 1.0 RPC.
HTTPWeb3ProviderFlag = cli.StringFlag{
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",
Usage: "A mainchain web3 provider string http endpoint",
Value: "https://goerli.prylabs.net",
}
// Web3ProviderFlag defines a flag for a mainchain RPC endpoint.
Web3ProviderFlag = cli.StringFlag{
Web3ProviderFlag = &cli.StringFlag{
Name: "web3provider",
Usage: "A mainchain web3 provider string endpoint. Can either be an IPC file string or a WebSocket endpoint. Cannot be an HTTP endpoint.",
Value: "wss://goerli.prylabs.net/websocket",
}
// DepositContractFlag defines a flag for the deposit contract address.
DepositContractFlag = cli.StringFlag{
DepositContractFlag = &cli.StringFlag{
Name: "deposit-contract",
Usage: "Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate.",
Value: "0x4689a3C63CE249355C8a573B5974db21D2d1b8Ef",
}
// RPCHost defines the host on which the RPC server should listen.
RPCHost = cli.StringFlag{
RPCHost = &cli.StringFlag{
Name: "rpc-host",
Usage: "Host on which the RPC server should listen",
Value: "0.0.0.0",
}
// RPCPort defines a beacon node RPC port to open.
RPCPort = cli.IntFlag{
RPCPort = &cli.IntFlag{
Name: "rpc-port",
Usage: "RPC port exposed by a beacon node",
Value: 4000,
}
// RPCMaxPageSize defines the maximum numbers per page returned in RPC responses from this
// beacon node (default: 500).
RPCMaxPageSize = cli.IntFlag{
RPCMaxPageSize = &cli.IntFlag{
Name: "rpc-max-page-size",
Usage: "Max number of items returned per page in RPC responses for paginated endpoints (default: 500)",
Usage: "Max number of items returned per page in RPC responses for paginated endpoints.",
Value: 500,
}
// CertFlag defines a flag for the node's TLS certificate.
CertFlag = cli.StringFlag{
CertFlag = &cli.StringFlag{
Name: "tls-cert",
Usage: "Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.",
}
// KeyFlag defines a flag for the node's TLS key.
KeyFlag = cli.StringFlag{
KeyFlag = &cli.StringFlag{
Name: "tls-key",
Usage: "Key for secure gRPC. Pass this and the tls-cert flag in order to use gRPC securely.",
}
// GRPCGatewayPort enables a gRPC gateway to be exposed for Prysm.
GRPCGatewayPort = cli.IntFlag{
GRPCGatewayPort = &cli.IntFlag{
Name: "grpc-gateway-port",
Usage: "Enable gRPC gateway for JSON requests",
}
// GPRCGatewayCorsDomain serves preflight requests when serving gRPC JSON gateway.
GPRCGatewayCorsDomain = &cli.StringFlag{
Name: "grpc-gateway-corsdomain",
Usage: "Comma separated list of domains from which to accept cross origin requests " +
"(browser enforced). This flag has no effect if not used with --grpc-gateway-port.",
}
// MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers.
MinSyncPeers = cli.IntFlag{
MinSyncPeers = &cli.IntFlag{
Name: "min-sync-peers",
Usage: "The required number of valid peers to connect with before syncing.",
Value: 3,
}
// ContractDeploymentBlock is the block in which the eth1 deposit contract was deployed.
ContractDeploymentBlock = cli.IntFlag{
ContractDeploymentBlock = &cli.IntFlag{
Name: "contract-deployment-block",
Usage: "The eth1 block in which the deposit contract was deployed.",
Value: 1960177,
}
// SetGCPercent is the percentage of current live allocations at which the garbage collector is to run.
SetGCPercent = cli.IntFlag{
SetGCPercent = &cli.IntFlag{
Name: "gc-percent",
Usage: "The percentage of freshly allocated data to live data on which the gc will be run again.",
Value: 100,
}
// UnsafeSync starts the beacon node from the previously saved head state and syncs from there.
UnsafeSync = cli.BoolFlag{
UnsafeSync = &cli.BoolFlag{
Name: "unsafe-sync",
Usage: "Starts the beacon node with the previously saved head state instead of finalized state.",
}
// SlasherCertFlag defines a flag for the slasher TLS certificate.
SlasherCertFlag = cli.StringFlag{
SlasherCertFlag = &cli.StringFlag{
Name: "slasher-tls-cert",
Usage: "Certificate for secure slasher gRPC connection. Pass this in order to use slasher gRPC securely.",
}
// SlasherProviderFlag defines a flag for a slasher RPC provider.
SlasherProviderFlag = cli.StringFlag{
SlasherProviderFlag = &cli.StringFlag{
Name: "slasher-provider",
Usage: "A slasher provider string endpoint. Can either be an grpc server endpoint.",
Value: "127.0.0.1:5000",
}
// SlotsPerArchivedPoint specifies the number of slots between the archived points, to save beacon state in the cold
// section of DB.
SlotsPerArchivedPoint = &cli.IntFlag{
Name: "slots-per-archive-point",
Usage: "The slot durations of when an archived state gets saved in the DB.",
Value: 128,
}
// EnableDiscv5 enables running discv5.
EnableDiscv5 = &cli.BoolFlag{
Name: "enable-discv5",
Usage: "Starts dv5 dht.",
}
)

View File

@@ -3,7 +3,7 @@ package flags
import (
"github.com/prysmaticlabs/prysm/shared/cmd"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
// GlobalFlags specifies all the global flags for the
@@ -13,10 +13,11 @@ type GlobalFlags struct {
EnableArchivedValidatorSetChanges bool
EnableArchivedBlocks bool
EnableArchivedAttestations bool
UnsafeSync bool
EnableDiscv5 bool
MinimumSyncPeers int
MaxPageSize int
DeploymentBlock int
UnsafeSync bool
}
var globalConfig *GlobalFlags
@@ -38,31 +39,34 @@ func Init(c *GlobalFlags) {
// based on the provided cli context.
func ConfigureGlobalFlags(ctx *cli.Context) {
cfg := &GlobalFlags{}
if ctx.GlobalBool(ArchiveEnableFlag.Name) {
if ctx.Bool(ArchiveEnableFlag.Name) {
cfg.EnableArchive = true
}
if ctx.GlobalBool(ArchiveValidatorSetChangesFlag.Name) {
if ctx.Bool(ArchiveValidatorSetChangesFlag.Name) {
cfg.EnableArchivedValidatorSetChanges = true
}
if ctx.GlobalBool(ArchiveBlocksFlag.Name) {
if ctx.Bool(ArchiveBlocksFlag.Name) {
cfg.EnableArchivedBlocks = true
}
if ctx.GlobalBool(ArchiveAttestationsFlag.Name) {
if ctx.Bool(ArchiveAttestationsFlag.Name) {
cfg.EnableArchivedAttestations = true
}
if ctx.GlobalBool(UnsafeSync.Name) {
if ctx.Bool(UnsafeSync.Name) {
cfg.UnsafeSync = true
}
cfg.MaxPageSize = ctx.GlobalInt(RPCMaxPageSize.Name)
cfg.DeploymentBlock = ctx.GlobalInt(ContractDeploymentBlock.Name)
if ctx.Bool(EnableDiscv5.Name) {
cfg.EnableDiscv5 = true
}
cfg.MaxPageSize = ctx.Int(RPCMaxPageSize.Name)
cfg.DeploymentBlock = ctx.Int(ContractDeploymentBlock.Name)
configureMinimumPeers(ctx, cfg)
Init(cfg)
}
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
cfg.MinimumSyncPeers = ctx.GlobalInt(MinSyncPeers.Name)
maxPeers := int(ctx.GlobalInt64(cmd.P2PMaxPeers.Name))
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
maxPeers := int(ctx.Int64(cmd.P2PMaxPeers.Name))
if cfg.MinimumSyncPeers > maxPeers {
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
cfg.MinimumSyncPeers = maxPeers

View File

@@ -1,29 +1,29 @@
package flags
import (
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v2"
)
var (
// InteropGenesisStateFlag defines a flag for the beacon node to load genesis state via file.
InteropGenesisStateFlag = cli.StringFlag{
InteropGenesisStateFlag = &cli.StringFlag{
Name: "interop-genesis-state",
Usage: "The genesis state file (.SSZ) to load from",
}
// InteropMockEth1DataVotesFlag enables mocking the eth1 proof-of-work chain data put into blocks by proposers.
InteropMockEth1DataVotesFlag = cli.BoolFlag{
InteropMockEth1DataVotesFlag = &cli.BoolFlag{
Name: "interop-eth1data-votes",
Usage: "Enable mocking of eth1 data votes for proposers to package into blocks",
}
// InteropGenesisTimeFlag specifies genesis time for state generation.
InteropGenesisTimeFlag = cli.Uint64Flag{
InteropGenesisTimeFlag = &cli.Uint64Flag{
Name: "interop-genesis-time",
Usage: "Specify the genesis time for interop genesis state generation. Must be used with " +
"--interop-num-validators",
}
// InteropNumValidatorsFlag specifies number of genesis validators for state generation.
InteropNumValidatorsFlag = cli.Uint64Flag{
InteropNumValidatorsFlag = &cli.Uint64Flag{
Name: "interop-num-validators",
Usage: "Specify number of genesis validators to generate for interop. Must be used with --interop-genesis-time",
}

View File

@@ -4,6 +4,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"cors.go",
"gateway.go",
"handlers.go",
"log.go",
@@ -16,6 +17,7 @@ go_library(
deps = [
"//shared:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_grpc_gateway_library",
"@com_github_rs_cors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@grpc_ecosystem_grpc_gateway//runtime:go_default_library",
"@org_golang_google_grpc//:go_default_library",

View File

@@ -0,0 +1,20 @@
package gateway
import (
"net/http"
"github.com/rs/cors"
)
func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
if len(allowedOrigins) == 0 {
return srv
}
c := cors.New(cors.Options{
AllowedOrigins: allowedOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet},
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler(srv)
}

View File

@@ -19,13 +19,14 @@ var _ = shared.Service(&Gateway{})
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward
// it to the beacon-chain gRPC server.
type Gateway struct {
conn *grpc.ClientConn
ctx context.Context
cancel context.CancelFunc
gatewayAddr string
remoteAddr string
server *http.Server
mux *http.ServeMux
conn *grpc.ClientConn
ctx context.Context
cancel context.CancelFunc
gatewayAddr string
remoteAddr string
server *http.Server
mux *http.ServeMux
allowedOrigins []string
startFailure error
}
@@ -64,7 +65,7 @@ func (g *Gateway) Start() {
g.server = &http.Server{
Addr: g.gatewayAddr,
Handler: g.mux,
Handler: newCorsHandler(g.mux, g.allowedOrigins),
}
go func() {
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
@@ -105,16 +106,17 @@ func (g *Gateway) Stop() error {
// New returns a new gateway server which translates HTTP into gRPC.
// Accepts a context and optional http.ServeMux.
func New(ctx context.Context, remoteAddress, gatewayAddress string, mux *http.ServeMux) *Gateway {
func New(ctx context.Context, remoteAddress, gatewayAddress string, mux *http.ServeMux, allowedOrigins []string) *Gateway {
if mux == nil {
mux = http.NewServeMux()
}
return &Gateway{
remoteAddr: remoteAddress,
gatewayAddr: gatewayAddress,
ctx: ctx,
mux: mux,
remoteAddr: remoteAddress,
gatewayAddr: gatewayAddress,
ctx: ctx,
mux: mux,
allowedOrigins: allowedOrigins,
}
}

View File

@@ -5,6 +5,7 @@ import (
"flag"
"fmt"
"net/http"
"strings"
joonix "github.com/joonix/log"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
@@ -13,9 +14,10 @@ import (
)
var (
beaconRPC = flag.String("beacon-rpc", "localhost:4000", "Beacon chain gRPC endpoint")
port = flag.Int("port", 8000, "Port to serve on")
debug = flag.Bool("debug", false, "Enable debug logging")
beaconRPC = flag.String("beacon-rpc", "localhost:4000", "Beacon chain gRPC endpoint")
port = flag.Int("port", 8000, "Port to serve on")
debug = flag.Bool("debug", false, "Enable debug logging")
allowedOrigins = flag.String("corsdomain", "", "A comma separated list of CORS domains to allow.")
)
func init() {
@@ -31,7 +33,7 @@ func main() {
}
mux := http.NewServeMux()
gw := gateway.New(context.Background(), *beaconRPC, fmt.Sprintf("0.0.0.0:%d", *port), mux)
gw := gateway.New(context.Background(), *beaconRPC, fmt.Sprintf("0.0.0.0:%d", *port), mux, strings.Split(*allowedOrigins, ","))
mux.HandleFunc("/swagger/", gateway.SwaggerServer())
mux.HandleFunc("/healthz", healthzServer(gw))
gw.Start()

View File

@@ -153,7 +153,7 @@ func (s *Service) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight
func (s *Service) saveGenesisState(ctx context.Context, genesisState *stateTrie.BeaconState) error {
s.chainStartDeposits = make([]*ethpb.Deposit, genesisState.NumValidators())
stateRoot, err := genesisState.HashTreeRoot()
stateRoot, err := genesisState.HashTreeRoot(ctx)
if err != nil {
return err
}
@@ -183,16 +183,20 @@ func (s *Service) saveGenesisState(ctx context.Context, genesisState *stateTrie.
return errors.Wrap(err, "could save finalized checkpoint")
}
pubKeys := make([][48]byte, 0, genesisState.NumValidators())
indices := make([]uint64, 0, genesisState.NumValidators())
for i := uint64(0); i < uint64(genesisState.NumValidators()); i++ {
pk := genesisState.PubkeyAtIndex(i)
if err := s.beaconDB.SaveValidatorIndex(ctx, pk[:], i); err != nil {
return errors.Wrapf(err, "could not save validator index: %d", i)
}
pubKeys = append(pubKeys, pk)
indices = append(indices, i)
s.chainStartDeposits[i] = &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: pk[:],
},
}
}
if err := s.beaconDB.SaveValidatorIndices(ctx, pubKeys, indices); err != nil {
return errors.Wrap(err, "could not save validator indices")
}
return nil
}

View File

@@ -17,14 +17,14 @@ import (
"github.com/prysmaticlabs/prysm/shared/logutil"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
gologging "github.com/whyrusleeping/go-logging"
prefixed "github.com/x-cray/logrus-prefixed-formatter"
_ "go.uber.org/automaxprocs"
"gopkg.in/urfave/cli.v2"
"gopkg.in/urfave/cli.v2/altsrc"
)
var appFlags = []cli.Flag{
flags.NoCustomConfigFlag,
flags.DepositContractFlag,
flags.Web3ProviderFlag,
flags.HTTPWeb3ProviderFlag,
@@ -38,6 +38,7 @@ var appFlags = []cli.Flag{
flags.ContractDeploymentBlock,
flags.SetGCPercent,
flags.UnsafeSync,
flags.EnableDiscv5,
flags.InteropMockEth1DataVotesFlag,
flags.InteropGenesisStateFlag,
flags.InteropNumValidatorsFlag,
@@ -46,6 +47,7 @@ var appFlags = []cli.Flag{
flags.ArchiveValidatorSetChangesFlag,
flags.ArchiveBlocksFlag,
flags.ArchiveAttestationsFlag,
flags.SlotsPerArchivedPoint,
cmd.BootstrapNode,
cmd.NoDiscovery,
cmd.StaticPeers,
@@ -79,15 +81,16 @@ var appFlags = []cli.Flag{
debug.TraceFlag,
cmd.LogFileName,
cmd.EnableUPnPFlag,
cmd.ConfigFileFlag,
}
func init() {
appFlags = append(appFlags, featureconfig.BeaconChainFlags...)
appFlags = cmd.WrapFlags(append(appFlags, featureconfig.BeaconChainFlags...))
}
func main() {
log := logrus.WithField("prefix", "main")
app := cli.NewApp()
app := cli.App{}
app.Name = "beacon-chain"
app.Usage = "this is a beacon chain implementation for Ethereum 2.0"
app.Action = startNode
@@ -96,7 +99,14 @@ func main() {
app.Flags = appFlags
app.Before = func(ctx *cli.Context) error {
format := ctx.GlobalString(cmd.LogFormat.Name)
// Load any flags from file, if specified.
if ctx.IsSet(cmd.ConfigFileFlag.Name) {
if err := altsrc.InitInputSourceWithContext(appFlags, altsrc.NewYamlSourceFromFlagFunc(cmd.ConfigFileFlag.Name))(ctx); err != nil {
return err
}
}
format := ctx.String(cmd.LogFormat.Name)
switch format {
case "text":
formatter := new(prefixed.TextFormatter)
@@ -104,7 +114,7 @@ func main() {
formatter.FullTimestamp = true
// If persistent log files are written - we disable the log messages coloring because
// the colors are ANSI codes and seen as gibberish in the log files.
formatter.DisableColors = ctx.GlobalString(cmd.LogFileName.Name) != ""
formatter.DisableColors = ctx.String(cmd.LogFileName.Name) != ""
logrus.SetFormatter(formatter)
break
case "fluentd":
@@ -121,7 +131,7 @@ func main() {
return fmt.Errorf("unknown log format %s", format)
}
logFileName := ctx.GlobalString(cmd.LogFileName.Name)
logFileName := ctx.String(cmd.LogFileName.Name)
if logFileName != "" {
if err := logutil.ConfigurePersistentLogging(logFileName); err != nil {
log.WithError(err).Error("Failed to configuring logging to disk.")
@@ -129,7 +139,7 @@ func main() {
}
if ctx.IsSet(flags.SetGCPercent.Name) {
runtimeDebug.SetGCPercent(ctx.GlobalInt(flags.SetGCPercent.Name))
runtimeDebug.SetGCPercent(ctx.Int(flags.SetGCPercent.Name))
}
runtime.GOMAXPROCS(runtime.NumCPU())
return debug.Setup(ctx)
@@ -149,7 +159,7 @@ func main() {
}
func startNode(ctx *cli.Context) error {
verbosity := ctx.GlobalString(cmd.VerbosityFlag.Name)
verbosity := ctx.String(cmd.VerbosityFlag.Name)
level, err := logrus.ParseLevel(verbosity)
if err != nil {
return err

View File

@@ -8,6 +8,7 @@ go_library(
deps = [
"//beacon-chain/archiver:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/flags:go_default_library",
@@ -21,8 +22,10 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/rpc:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/sync/initial-sync-old:go_default_library",
"//shared:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
@@ -36,7 +39,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)
@@ -49,6 +52,6 @@ go_test(
"//beacon-chain/core/feed/state:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli//:go_default_library",
"@in_gopkg_urfave_cli_v2//:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More