Compare commits

...

289 Commits

Author SHA1 Message Date
Preston Van Loon
0e4cb68249 Add regression test found by fuzzer (#6628)
* Add regression test found by fuzzer
2020-07-18 03:47:03 +00:00
Nishant Das
cc3c3a0c54 QSP 29: Add Rate Limiter For All Topics (#6606)
* initial commit
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into rateLimiter
* finally
* add mutex
* rate limiter for all rpc reqs
* remove recursive readlocks
* spelling
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* fix all tests
* fix all tests
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
* Merge refs/heads/master into rateLimiter
2020-07-17 08:58:51 +00:00
Raul Jordan
9ad6277852 Accounts V2: Create Wallet Non-Interactively + Add Test Coverage (#6611)
* create wallet noninteractively
* tests for wallet create
* kapol feedback
* Merge refs/heads/master into create-wallet-tests
* Merge refs/heads/master into create-wallet-tests
2020-07-17 08:21:16 +00:00
terence tsao
ebd05fba01 Improve test coverage info.go (#6618)
* Update kv aggregated_test.go
* Update block_test.go
* Update forkchoice_test.go
* Update unaggregated_test.go
* Update prepare_forkchoice_test.go
* Update prune_expired_test.go
* Update atts service_test.go
* Update service_attester_test.go
* Update service_proposer_test.go
* Upate exit service_test.go
* Gaz
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Move averageBalance from log.go to info.go
* Move avg balance from log.go to info.go
* Add info test
* Remove unused logEpochData in log.go
* Gaz
* Merge branch 'master' into info-cov
* gaz
* Merge refs/heads/master into info-cov
2020-07-16 22:44:43 +00:00
rkapka
eb0d70814a Fixed nil pointer error in DepositCache (#6596) 2020-07-16 14:48:36 -07:00
rkapka
77607c6fdb Applies assertion funcs to cache tests (#6617)
* testutils for cache
* Merge branch 'master' into cache-test-refactor
* removed some empty lines
* Merge branch 'origin-master' into cache-test-refactor
* Merge remote-tracking branch 'rkapka/cache-test-refactor' into cache-test-refactor
* revert package names
2020-07-16 19:34:08 +00:00
Nishant Das
cf2bbec6a8 Use Faster Hash Function (#6616)
* change to proto hash

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-16 10:19:14 -05:00
terence tsao
69a2dc2716 Improve test coverage receive_block.go (#6613)
* Update kv aggregated_test.go
* Update block_test.go
* Update forkchoice_test.go
* Update unaggregated_test.go
* Update prepare_forkchoice_test.go
* Update prune_expired_test.go
* Update atts service_test.go
* Update service_attester_test.go
* Update service_proposer_test.go
* Upate exit service_test.go
* Gaz
* TestService_ReceiveBlockInitialSync
* TestService_ReceiveBlockBatch
* TestService_HasInitSyncBlock
* Merge branch 'master' of github.com:prysmaticlabs/prysm into rcv-blk-cov
* Merge branch 'master' of github.com:prysmaticlabs/prysm into rcv-blk-cov
* Merge branch 'rcv-blk-cov' of github.com:prysmaticlabs/prysm into rcv-blk-cov
* Gaz
* Merge refs/heads/master into rcv-blk-cov
* Merge refs/heads/master into rcv-blk-cov
* Merge refs/heads/master into rcv-blk-cov
* Merge refs/heads/master into rcv-blk-cov
2020-07-16 13:34:34 +00:00
Raul Jordan
d897640625 Add YAML Struct Tag to Eth1FollowDistance Parameter (#6612)
* add yaml struct tag to eth1 follow distance
* more config values missing yaml tags
* Merge refs/heads/master into follow-distance-struct-tag
* Merge refs/heads/master into follow-distance-struct-tag
* Merge refs/heads/master into follow-distance-struct-tag
2020-07-16 13:04:59 +00:00
Victor Farazdagi
deb025f57c Applies assertion funcs to blockchain tests (#6605)
* applies assertion funcs to blockchain tests
* Merge branch 'master' into blockchain-apply-testutils-assertions
* gofmt
* Merge refs/heads/master into blockchain-apply-testutils-assertions
* Merge refs/heads/master into blockchain-apply-testutils-assertions
* Merge refs/heads/master into blockchain-apply-testutils-assertions
* Merge refs/heads/master into blockchain-apply-testutils-assertions
* Merge refs/heads/master into blockchain-apply-testutils-assertions
2020-07-16 12:11:39 +00:00
Nishant Das
f6756bb591 QSP 23: Prune Peers From Peer Handler (#6614)
* checkpoint
* Merge refs/heads/master into prunePeers
2020-07-16 10:09:51 +00:00
Raul Jordan
df73851749 Accounts Revamp: Wallet Edit-Config (#6607)
* edit remote config
* gets messed up when writing to file again
* proper editing
* noninteractive mode
* test for edit wallet
* Merge branch 'master' into edit-wallet
* wallet edit test done
* imports
* Merge refs/heads/master into edit-wallet
* Merge refs/heads/master into edit-wallet
2020-07-16 05:08:16 +00:00
Ivan Martinez
fbeba94a92 Change accounts-v2 import and export to be non-interactive (#6609)
* Change import and export to be non-interactive
* Fix
* fix lint
* Merge branch 'master' into make-export-import-nonint
* Comments
* Merge branch 'make-export-import-nonint' of github.com:prysmaticlabs/prysm into make-export-import-nonint
* Merge refs/heads/master into make-export-import-nonint
2020-07-15 23:00:00 +00:00
Victor Farazdagi
a0e5754464 Resolves possible import cycle in testutils assertions (#6610)
* resolves possible import cycle in testutils assertions
* gazelle
* linter
* linter
2020-07-15 20:10:54 +00:00
terence tsao
c309ba6a10 Applies assertion funcs to operation tests (#6608)
* Update kv aggregated_test.go
* Update block_test.go
* Update forkchoice_test.go
* Update unaggregated_test.go
* Update prepare_forkchoice_test.go
* Update prune_expired_test.go
* Update atts service_test.go
* Update service_attester_test.go
* Update service_proposer_test.go
* Upate exit service_test.go
* Gaz
2020-07-15 18:37:51 +00:00
Victor Farazdagi
c2615168d9 Applies assertion funcs to sync tests (#6603)
* applies assertion funcs to sync/initial-sync tests
* applies assertion funcs to sync/initial-sync tests
* gazelle
* Merge branch 'master' into sync-apply-testutils-assertions
* gazelle
* applies assertion funcs to sync/initial-sync tests
* applies assertion funcs to sync/initial-sync tests
* applies assertion funcs to sync/initial-sync tests
* applies assertion funcs to sync/initial-sync tests
* applies assertion funcs to sync/initial-sync tests
* Merge branch 'master' into sync-apply-testutils-assertions
2020-07-15 04:41:11 +00:00
Raul Jordan
f12f75224e Accounts V2: Create Wallet via CLI (#6543)
* begin on the proto definitions

* define remote signer service protos

* basic implementation

* remote keymanager docs

* finalize remote client doc

* amend response

* fix proto defs

* test new and begin test sign

* test sign done

* remote oneof

* rename

* fix build

* viz

* Update validator/keymanager/v2/remote/remote_test.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update validator/accounts/v2/wallet.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* fmt

* move the input check to accounts new

* begin on requesting cli input for remote keymanager config

* move the input check to accounts new

* begin on requesting cli input for remote keymanager config

* define wallet v2

* create wallet most logic done

* init remote wallet

* create wallet moving nicely

* ensure wallet create works

* reenable import export list

* further refactor

* improve handling of input wallet dir

* add all validation to cert path checks

* lint

* list test

* new wallet lint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-07-14 23:05:21 -05:00
terence tsao
fe14c5086a Fix duplicated selectV1Keymanager (#6604)
* Fix duplicated selectV1Keymanager
2020-07-15 01:05:33 +00:00
Ivan Martinez
d7bcea7906 Allow creating new accounts to be non-interactive (#6602)
* Allow accounts to be made non-interactively
* Merge branch 'master' of github.com:prysmaticlabs/prysm into make-accounts-noninteractive
* Update validator/node/node.go
* Update validator/accounts/v2/list.go
2020-07-14 23:00:58 +00:00
terence tsao
eef9a760ec Fix inconsistent pubkey formatting in logs (#6600)
* Use Trunc for pub key
* Gaz
* Merge refs/heads/master into fix-pub-key
2020-07-14 20:37:01 +00:00
Ivan Martinez
5278b75c02 E2E Improvements (#6587)
* Change long-running e2e to run minimally
* Merge branch 'master' into e2e-config-change
* Remove deposits flag
* Merge branch 'e2e-config-change' of github.com:prysmaticlabs/prysm into e2e-config-change
* Merge branch 'master' of github.com:prysmaticlabs/prysm into e2e-config-change
* Reduce secondsperslot
* Add todo
* Merge branch 'master' of github.com:prysmaticlabs/prysm into e2e-config-change
* Merge refs/heads/master into e2e-config-change
* Merge refs/heads/master into e2e-config-change
* Merge refs/heads/master into e2e-config-change
* Merge refs/heads/master into e2e-config-change
* Merge refs/heads/master into e2e-config-change
* Merge refs/heads/master into e2e-config-change
2020-07-14 18:08:17 +00:00
Victor Farazdagi
d9fd2521af Applies assertion funcs to p2p tests (#6597)
* applies assertion funcs to p2p/encoder tests
* applies assertion funcs to p2p/peers tests
* addr_factory_test + broadcaster_test updated
* connection_gater_test updated
* applies assertion funcs to p2p/service tests
* Merge branch 'master' into p2p-apply-testutils-assertions
* minor fixes
* Merge branch 'master' into p2p-apply-testutils-assertions
* Merge refs/heads/master into p2p-apply-testutils-assertions
2020-07-14 16:51:39 +00:00
terence tsao
ea32af7bf7 Update justified point for batch sync (#6594)
* Update justified points
* Add the same check to on block init sync
* Add comments
* Add test
* Gaz
* Merge branch 'master' into fix-batch-sync
* Merge refs/heads/master into fix-batch-sync
2020-07-14 16:20:25 +00:00
terence tsao
8fda48409c Remove batch blocks save at getAttPreState (#6584)
* Check if block exists in cache
* Consider init sync blocks
* Fixed test
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
* Merge refs/heads/master into get-block
2020-07-14 15:46:00 +00:00
Shay Zluf
e0c803abfc Fast list validators (#6580)
* Fast list validators
* fix tests
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fast_list_validators
* add small test
* Merge branch 'master' into fast_list_validators
* remove unneeded fatcher
* Merge branch 'fast_list_validators' of github.com:prysmaticlabs/prysm into fast_list_validators
* nishant feedback
* package name
2020-07-14 12:21:55 +00:00
Preston Van Loon
b099cab9b1 automaxprocs: Log error to DEBUG instead of panic (#6592)
* Log error to DEBUG instead of panic. Fixes #6591
* Merge branch 'master' into fix-6591
2020-07-14 01:51:07 +00:00
Ivan Martinez
52e9155df3 Change validator accounts-v2 to validator wallet-v2 accounts (#6589)
* Change `validator accounts-v2` to `validator wallet-v2 accounts-v2

* Change to accounts

* Fix cmd

* Rename cmd.go to cmd_accounts.go
2020-07-13 19:58:06 -05:00
Preston Van Loon
774b4b7eef SubmitAggregateAndProof now prefers its own validator attestations (#6566)
* SubmitAggregateAndProof now prefers its own validator attestations
* only do aggregate bits count comparsion when best also contains the validator index
* gofmt
* better test
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* @terencechain feedback
* Merge refs/heads/master into prefer-own-attestation
* update comment
* Merge branch 'prefer-own-attestation' of github.com:prysmaticlabs/prysm into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
* Merge refs/heads/master into prefer-own-attestation
2020-07-13 22:02:01 +00:00
Ivan Martinez
cd2ea868ff Direct Keymanager: Add Export and Import commands (#6528)
* add in configs
* ask for enable accounts v2
* begin integration of v2 keymanager
* refactor wallet opening
* include significant refactoring of how opening a wallet works, making it easy to include at runtime
* ensure build with keymanager v2
* further improving runtime integration
* default pass paths
* finally running v2 at runtime
* import spacing
* Merge branch 'master' into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Get started on export and import commands
* Work more on import
* Complete functionality
* Merge branch 'master' of github.com:prysmaticlabs/prysm into accounts-import
* Undo
* Merge branch 'master' of github.com:prysmaticlabs/prysm into accounts-import
* cleanup
* Extract code to functions
* Merge branch 'master' of github.com:prysmaticlabs/prysm into accounts-import
* Add comments
* Fix ocmments
* Improvements
* fix
* Merge branch 'master' of github.com:prysmaticlabs/prysm into accounts-import
* Remove GetSigningKeyForAccount
* Progress
* Fix build
* Fix name
* Merge branch 'master' into accounts-import
* Add logging
* Merge branch 'accounts-import' of github.com:prysmaticlabs/prysm into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
* Merge refs/heads/master into accounts-import
2020-07-13 21:37:18 +00:00
terence tsao
2bbae15194 Handle roughtime response error (#6586)
* Handle roughtime response error
* Update shared/roughtime/roughtime.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Merge branch 'master' into fix-roughtime-logs
* Update shared/roughtime/roughtime.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Merge refs/heads/master into fix-roughtime-logs
* Merge refs/heads/master into fix-roughtime-logs
* Merge refs/heads/master into fix-roughtime-logs
2020-07-13 21:00:43 +00:00
terence tsao
18f28e2840 VerifyBlkDescendant - fixed a nil return bug and added a test (#6553)
* Refactor verifyBlkDescendant to be public and omit the slot argument, which was only used for error messages
* impl in mock
* gofmt
* Merge refs/heads/master into refactor-verifyBlkDescendant-signature
* Better errors
* Tests. Yay!
* Comments
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Fixed receive blck test
* Merge branch 'test-blk-des' of github.com:prysmaticlabs/prysm into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Ensure finalized root not zeros
* Merge branch 'test-blk-des' of github.com:prysmaticlabs/prysm into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
* Ensure non zero hashes when compare
* Merge branch 'test-blk-des' of github.com:prysmaticlabs/prysm into test-blk-des
* Merge refs/heads/master into test-blk-des
* Merge refs/heads/master into test-blk-des
2020-07-13 20:14:36 +00:00
Nishant Das
27577bc324 QSP 32: Add Appropriate Stream Deadlines for RPC Requests (#6583)
* add no deadlines
* Merge branch 'master' into fixDeadlines
* nogo
* Merge refs/heads/master into fixDeadlines
* Merge refs/heads/master into fixDeadlines
2020-07-13 19:40:12 +00:00
Nishant Das
79fbaaea0b QSP18: Add Varint Header Validation (#6577)
* add header validation

* Update beacon-chain/p2p/encoder/varint_test.go

* Update beacon-chain/p2p/encoder/varint_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-07-13 14:02:03 -05:00
Victor Farazdagi
d1e754f011 Fixes display issue in testutils/assert (#6585)
* fixes minor display issue in testutils/assert
2020-07-13 17:20:18 +00:00
Victor Farazdagi
77d1a6c698 Adds testutils/assert and testutils/require (#6563)
* testutils/assert

* adds assertion tb mock

* testutil/require

* gazelle

* comment public types

* set TestOnly attribute

* fixes package name

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-13 10:19:52 -05:00
Nishant Das
f2b4f91419 save blocks (#6578) 2020-07-13 06:33:45 -07:00
Justin
ec800bac7c Remove redundant check in exit.go (#6559)
* Remove redundant check in exit.go

The check `exit.Exit.ValidatorIndex >= uint64(beaconState.NumValidators())` is done in `beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)`.
* Merge branch 'master' into patch-1
* Merge branch 'master' into patch-1
* Merge branch 'master' into patch-1
* Merge branch 'master' into patch-1
* Merge branch 'master' into patch-1
2020-07-13 09:36:40 +00:00
Nishant Das
63cb99b359 add logs (#6575) 2020-07-12 22:36:03 -07:00
Nishant Das
b5bd1260d0 Clean Up P2P Service (#6574)
* clean up
* clean up
* gaz
* preston's review
* preston's review
* Merge refs/heads/master into CleanUpP2P
2020-07-13 04:16:24 +00:00
terence tsao
62df4995e6 Clean up block chain pkg initial sync (#6562) 2020-07-12 20:44:06 -07:00
Victor Farazdagi
c35bdf2649 Refactors calls to deprecated libp2p methods (#6554)
* refactors calls to deprecated IDB58Decode()
* Merge branch 'master' into p2p-refactor-deprecated
* updated packages
* gazelle
* mod tidy
* refactors publish()/subscribe() deprecated methods
* gofmt
* test update join/leave topic methods
* re-arrange imports
* Merge branch 'master' into p2p-refactor-deprecated
* Merge refs/heads/master into p2p-refactor-deprecated
2020-07-13 02:28:40 +00:00
Nishant Das
d4c3546434 Validate RPC Topics Before Sending Requests (#6558)
* clean up

* fix panic

* add test mapping

* add schema version change

* fix test

* fix another test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-13 09:20:53 +08:00
Ivan Martinez
10c0d5b569 Remove unused functions in validator DB (#6557)
* Remove unused functions in validator DB
* Iface
* Merge branch 'master' into reomve-unused-val-db
* Merge branch 'master' into reomve-unused-val-db
* Merge refs/heads/master into reomve-unused-val-db
2020-07-11 17:23:30 +00:00
Ivan Martinez
18c00ab25d Add BoltTimeout to IO config (#6555)
* Make bolt timeout a config value
* Merge refs/heads/master into best-practices0timeout
* Merge refs/heads/master into best-practices0timeout
* Merge refs/heads/master into best-practices0timeout
2020-07-11 16:43:26 +00:00
Nishant Das
b82defddb2 add timeouts (#6560) 2020-07-11 08:18:25 -07:00
Preston Van Loon
fa85d93a19 Verify roughtime results before accepting time offset (#6556)
* Verify roughtime results before accepting time offset
2020-07-11 04:42:26 +00:00
Preston Van Loon
1f35384578 Reject a block which its parent is not part of the finalized chain (#6549)
* Reject a block which its parent is not part of the finalized chain
* Refactor verifyBlkDescendant to be public and omit the slot argument, which was only used for error messages
* Merge branch 'refactor-verifyBlkDescendant-signature' into reject-bad-block
* fix
* impl in mock
* impl in mock
* gofmt
* Merge refs/heads/master into refactor-verifyBlkDescendant-signature
* Merge branch 'refactor-verifyBlkDescendant-signature' into reject-bad-block
* fix test
* add test
* Merge branch 'refactor-verifyBlkDescendant-signature' into reject-bad-block
* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into reject-bad-block
* move up in the validation pipeline, @terencechain offline feedback
* Merge refs/heads/master into reject-bad-block
2020-07-11 03:57:42 +00:00
Preston Van Loon
c9ca5857f8 Avoid automaxprocs default logger to printf (#6524)
* Avoid automaxprocs default logger to printf
* Merge branch 'master' into maxprocs
* gofmt, goimports
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* gazelle for docker images
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
* fix docker
* Merge refs/heads/master into maxprocs
* Merge refs/heads/master into maxprocs
2020-07-11 00:57:43 +00:00
Preston Van Loon
6c7965e82a Refactor verifyBlkDescendant to be public in chainservice API (#6552)
* Refactor verifyBlkDescendant to be public and omit the slot argument, which was only used for error messages
* impl in mock
* gofmt
* Merge refs/heads/master into refactor-verifyBlkDescendant-signature
* fix test
2020-07-11 00:16:52 +00:00
Preston Van Loon
29317ca8da Attestation cache: check bitlist length before checking contains (#6551)
* Attestation cache: check bitlist length before checking contains
2020-07-10 21:05:03 +00:00
Preston Van Loon
dbc9686d15 Add roughtime offset histogram metric, log on large offsets (#6546)
* Add roughtime_offset_nsec metric. Log a warning if offset is greater than 2 seconds
* gofmt
* use math.abs, add help text
* gofmt
2020-07-10 17:07:49 +00:00
Nishant Das
cc8b3e349d Save Boundary States (#6542) 2020-07-10 09:00:41 -07:00
Raul Jordan
2c9474ab7f Remote Keymanager: Proto Definitions and Basic Client Implementation (#6526)
* begin on the proto definitions
* define remote signer service protos
* basic implementation
* remote keymanager docs
* finalize remote client doc
* amend response
* fix proto defs
* test new and begin test sign
* test sign done
* Merge branch 'master' into remote-keymanager-v2
* remote oneof
* rename
* Merge branch 'remote-keymanager-v2' of github.com:prysmaticlabs/prysm into remote-keymanager-v2
* fix build
* Merge refs/heads/master into remote-keymanager-v2
* viz
* Merge branch 'remote-keymanager-v2' of github.com:prysmaticlabs/prysm into remote-keymanager-v2
* Merge refs/heads/master into remote-keymanager-v2
* Merge refs/heads/master into remote-keymanager-v2
* Update validator/keymanager/v2/remote/remote_test.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update validator/accounts/v2/wallet.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* fmt
2020-07-10 05:49:56 +00:00
terence tsao
ac79819077 Fix genesis validator count (#6540)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-09 19:54:24 -05:00
terence tsao
d54cefbe42 Part 1 of blockchain pkg clean up (#6537)
* Fix span names

* Clean up on block func

* Add updateFinalized helper

* Deprecate ReceiveBlockNoPubsub for ReceiveBlock

* Proposer to broadcast block

* Update migrate interface

* Replace ReceiveBlock for all

* Go fmt
2020-07-09 18:50:48 -05:00
rkapka
f7088e037c Finalized deposits cache (#6391)
* Renamed beforeBlk to untilBlk
* finalized deposits cache logic
* Merge branch 'master' into deposit-trie-cache

# Conflicts:
#	beacon-chain/blockchain/process_block.go
* simplified returning of finalized deposits
* added comment to FinalizedDeposits struct
* fixes after code review
* Merge branch 'master' into deposit-trie-cache
* fixed variable declaration
* renamed deposit cache test file
* fixed type casting
* compilation error fix
* deposit cache tests
* proposer test
* Merge branch 'master' into deposit-trie-cache
* added missing methods to interop beacon-chain service
* gofmt
* Merge branch 'master' into deposit-trie-cache
* gazelle
* cache deposit with eth1DepositIndex
* Merge branch 'master' into deposit-trie-cache
* fixed MerkleTrieIndex name
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
* code review adjustments
* feature flag
* Merge branch 'master' into deposit-trie-cache

# Conflicts:
#	beacon-chain/cache/depositcache/pending_deposits.go
#	shared/featureconfig/config.go
#	shared/featureconfig/flags.go
* merge fix
* gazelle
* Merge branch 'master' into deposit-trie-cache
* return a copy of deposits trie from the cache
* move more logic under feature flag
* Merge branch 'master' into deposit-trie-cache
* added missing beacon chain flag
* Merge branch 'master' into deposit-trie-cache
* use helper function for trie copying
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
* Merge branch 'master' into deposit-trie-cache
2020-07-09 20:24:40 +00:00
Ivan Martinez
b052ab7087 Make E2E less flaky (#6536) 2020-07-09 14:08:21 -05:00
Ivan Martinez
322998f7f1 Move subnet topics to global vars (#6525)
* Deduplicate subnet topic definitions
* Move topics to global file
* Gaz
* Merge branch 'master' into move-topics
* Fix
* Merge refs/heads/master into move-topics
* Bazel
* Merge branch 'move-topics' of github.com:prysmaticlabs/prysm into move-topics
* Fix tests
* Fix
* Undo e2e changes
* Revert "Undo e2e changes"

This reverts commit 3037bb3590.
* Fix
* Fix
* Merge refs/heads/master into move-topics
* Merge refs/heads/master into move-topics
* Merge refs/heads/master into move-topics
* Comments
* Merge refs/heads/master into move-topics
* Merge refs/heads/master into move-topics
2020-07-09 17:38:15 +00:00
terence tsao
47cbfbf437 Remove participation link when list accounts (#6535)
* Remove url
* Merge refs/heads/master into rm-link
2020-07-09 16:50:19 +00:00
Raul Jordan
15d660d8eb Direct Keymanager: Proper Keystore.json Formatting (#6527)
* ensure keystore file has all fields
* fix tests
* fix build
* Merge branch 'master' into full-keystore-json
* define wallet.CanUnlockAccounts() to determine secret keys cache
initialization
* Merge branch 'full-keystore-json' of github.com:prysmaticlabs/prysm into full-keystore-json
* add can unlock accounts items
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
2020-07-09 16:24:06 +00:00
Raul Jordan
e3de674c77 Direct Keymanager: Proper Keystore.json Formatting (#6527)
* ensure keystore file has all fields
* fix tests
* fix build
* Merge branch 'master' into full-keystore-json
* define wallet.CanUnlockAccounts() to determine secret keys cache
initialization
* Merge branch 'full-keystore-json' of github.com:prysmaticlabs/prysm into full-keystore-json
* add can unlock accounts items
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
* Merge refs/heads/master into full-keystore-json
2020-07-09 16:23:53 +00:00
Preston Van Loon
fd80f73286 Improve make() capacity allocations (#6517)
* attestationutil.AttestingIndices: Minor improvement on indices array allocation
* progress
* more progress on makes
* progress
* more progress
* Merge branch 'master' of github.com:prysmaticlabs/prysm into memory1
* gaz
* fmt and imports
* Merge branch 'master' into memory2
* Min()
* remove spaces
* Merge branch 'master' of github.com:prysmaticlabs/prysm into memory2
* revert beacon-chain/operations/attestations/kv/block.go
* partially revert beacon-chain/operations/attestations/kv/aggregated.go
* Merge branch 'master' into memory2
* Merge branch 'master' into memory2
2020-07-09 15:50:58 +00:00
Nishant Das
c804347fc4 add fix and test (#6533)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-09 09:46:35 -05:00
Jim McDonald
b00c235586 Add separate permission for directories. (#6532) 2020-07-09 22:05:43 +08:00
Preston Van Loon
c2deab8948 State: use a constant for field count (#6513)
* State: use a constant for field count
* one more field count for alloc
* one more field count for alloc
* Merge branch 'master' into field-const-21
* Merge branch 'master' into field-const-21
* Merge branch 'master' into field-const-21
* Merge branch 'master' into field-const-21
2020-07-09 03:37:08 +00:00
terence tsao
8da0246702 Close buf writer in event of an error (#6523)
* Close buf writer in event of an error
* Merge refs/heads/master into close-bufwriter
* Merge refs/heads/master into close-bufwriter
2020-07-08 20:03:03 +00:00
Raul Jordan
132ad5beb8 Implement Accounts-v2 List: Direct Keymanager (#6510)
* begin list accounts impl
* colorize
* show deposit data
* separate responsibility
* comprehensive test for list complete
* gaz viz
* print account creation timestamp
* handle errs
* ask for wallet and pass dir
* Merge refs/heads/master into implement-v2-list
2020-07-08 19:21:54 +00:00
Ivan Martinez
ddf494f7b7 Use same permissions for all files in Prysm (#6522)
* Unify all perm prysm wide

* Gaz

* Imports
2020-07-08 12:30:22 -05:00
Victor Farazdagi
af4dfd4c36 Tiny optimization in state refs lock/unlock (#6520)
* tiny optimization in state refs count
2020-07-08 12:53:36 +00:00
Preston Van Loon
f2f2677070 attestationutil.AttestingIndices: Minor improvement on indices array allocation (#6508)
* attestationutil.AttestingIndices: Minor improvement on indices array allocation
* Merge branch 'master' of github.com:prysmaticlabs/prysm into HEAD
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
* Merge refs/heads/master into memory1
2020-07-08 09:57:50 +00:00
Preston Van Loon
a279f18461 Change from int64 to int for all flags so they load properly from config file. (#6498)
* Change from int64 to int for monitoring port so that the monitoring port is correctly ready from config file.
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
* replace all other usages of int64 flag. @nisdas feedback
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fix-monitoring-port
* revert tools/sendDepositTx
* fix build
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
* Merge branch 'master' into fix-monitoring-port
2020-07-08 08:21:06 +00:00
Preston Van Loon
c1ccadae55 Delete unused helpers.AttestingIndices (#6509)
* Delete unused helpers.AttestingIndices
* Merge branch 'master' into rm-AttestingIndices
* Merge branch 'master' into rm-AttestingIndices
* Merge branch 'master' into rm-AttestingIndices
* Merge branch 'master' into rm-AttestingIndices
2020-07-08 07:30:29 +00:00
Preston Van Loon
a02553815f Allocate the appropriate memory for retrieveIndicesFromBitfield (#6507)
* Allocate the appropriate memory for retrieveIndicesFromBitfield
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
* Merge refs/heads/master into memory0
2020-07-08 05:47:26 +00:00
Raul Jordan
fd9003f822 Integrate Accounts v2 Keymanager Into Validator Client (#6489)
* add in configs
* ask for enable accounts v2
* begin integration of v2 keymanager
* refactor wallet opening
* include significant refactoring of how opening a wallet works, making it easy to include at runtime
* ensure build with keymanager v2
* further improving runtime integration
* default pass paths
* finally running v2 at runtime
* import spacing
* Merge branch 'master' into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* confs
* rem e2e val flag
* Merge branch 'master' into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
* Merge refs/heads/master into v2-accounts-feature
2020-07-08 05:01:09 +00:00
terence tsao
fe13f1f856 Add init-sync-verbose feature flag (#6515)
* Add flag
* Use flag
* Merge refs/heads/master into init-sync-verbose
* Merge refs/heads/master into init-sync-verbose
* Merge refs/heads/master into init-sync-verbose
* Merge refs/heads/master into init-sync-verbose
* Merge refs/heads/master into init-sync-verbose
2020-07-08 04:19:58 +00:00
Nishant Das
074e3c9aa7 Use Custom Block Hashing Method (#6501)
* blockroot
* gaz
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
* Merge refs/heads/master into useCustomMethod
2020-07-08 03:53:16 +00:00
terence tsao
d42d685f78 Update verifyBlkPreState to verify only (#6506)
* Verify should not return the state
* Update tests
* Merge branch 'master' of github.com:prysmaticlabs/prysm into refactor
* Sync with master
* Merge refs/heads/master into refactor
* Fixed error msgs. Thanks @rauljordan
* Merge branch 'master' of github.com:prysmaticlabs/prysm into refactor
* Merge branch 'refactor' of github.com:prysmaticlabs/prysm into refactor
* Merge refs/heads/master into refactor
* Merge refs/heads/master into refactor
* Merge refs/heads/master into refactor
2020-07-08 02:54:55 +00:00
Preston Van Loon
1067800430 p2p.AddConnectionHandler: Remove unused arg for goodbyeFunc (#6511)
* p2p.AddConnectionHandler: Remove unused arg for goodbyeFunc
* accidental rename reqFunc
* update test
* Merge branch 'master' into rm-unused-param
* Merge branch 'master' into rm-unused-param
2020-07-08 02:04:15 +00:00
terence tsao
d53ab16004 Move skip-regen-historical-states to deprecated (#6512)
* Remove unused test
* Move flag to be deprecated
* Remove usages
* Update BUILD.bazel
* Merge refs/heads/master into rm-skip-stategen-usages
2020-07-08 00:15:45 +00:00
Preston Van Loon
90b8b76ae8 Remove unused EpochAttestationsRoot method (#6514)
* Remove unused EpochAttestationsRoot method
2020-07-07 23:39:11 +00:00
dv8silencer
c77296ff82 Show overall validator results in addition to epoch specific results (#6492)
* Add aggregate stats struct to validator struct.  Log aggregate voting summary
from beginning of node startup.
* Refactoring out to function UpdateLogAggregateStats which updates and logs aggregate statistics
* Changes to spelling.  More refactoring so that the rest of aggregate stats are moved to UpdateLogAggregateStats
* Bugfix - moved startBalance initialization out of if.
* Remove a print debug.  Spelling adjustment.
* Minor refactoring/grammar changes
* Renaming variables for consistency.
* Address linter feedback:  omit 2nd value from range over map as values were not being used.
* Address feedback from PR re comments.

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Addressing feedback from terencechain in PR. (name changes)
* Addressing feedback from terencechain in PR.
* Create test for UpdateLogAggregateStats()
* Changed comment regarding "numberOfEpochs" log
* go fmt metrics_test.go
* bazel run //:gazelle -- fix
* fix typos
* Improved the code to detect the last iteration of the range responses loop
* Merge branch 'master' into tempIssue4987
* For both previous Epoch and total runtime statistics, do not count non-included attestations for "correctly" statistics.
* Update wording in since-launch logging

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Store first logged epoch in startEpoch and appropriately adjust calculations.  Initialize startEpoch to ^uint64(0).
* Merge branch 'master' into tempIssue4987
* Update validator/client/metrics.go
* Update validator/client/metrics.go
* Update validator/client/validator.go
* Update validator/client/metrics_test.go
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
* Merge branch 'master' into tempIssue4987
2020-07-07 19:23:56 +00:00
Shay Zluf
87c18d4e0d Slasher readiness status response (#6504)
* Slasher readiness status response
* add comments
* Merge refs/heads/master into slasher_status
* Merge refs/heads/master into slasher_status
2020-07-07 17:11:07 +00:00
Ivan Martinez
d9fea7cb8c [QSP] Deduplicate functions for audit best practices (#6476)
* Deduplicate DB functions and getters

* Rename function

* Reduce to prestons suggestions

* Fix bug

* Fix

* Start changing state db funcs

* Fix tests

* Change return

* Add checks

* Fix test

* Fix

* Nishant comment

* Fix

* Fix

* Fix

* Fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-07 11:29:25 -05:00
Nishant Das
b868dfa481 Save Archived State (#6503) 2020-07-07 08:04:55 -07:00
Preston Van Loon
46b82eb15d Sync: Verify all block and attestations signatures on finalized blocks by default (#6499)
* Make verifying all signatures the default operation
* Merge branch 'master' into flip-verify-all
* Merge branch 'master' into flip-verify-all
* Merge branch 'master' into flip-verify-all
* Merge branch 'master' into flip-verify-all
2020-07-07 05:13:45 +00:00
Nishant Das
64fa474434 Batch Verify Blocks (#6469)
* add everything so far
* checkpoint progress
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* fix
* checkpoint
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* checkpoint again
* checkpoint again
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* commenting
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* bls cleanup
* revert this back
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* revert core changes
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* add flag
* add test
* add one more test
* clean up
* comment
* lint
* terence's review
* Merge refs/heads/master into fastBLS
* Merge refs/heads/master into fastBLS
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* Merge refs/heads/master into fastBLS
* remove additional method
* Merge branch 'fastBLS' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
* fix
* Merge refs/heads/master into fastBLS
* copy
* Merge branch 'fastBLS' of https://github.com/prysmaticlabs/geth-sharding into fastBLS
2020-07-07 04:16:12 +00:00
Shay Zluf
8ddfde41e3 Historical detection fix (#6455)
* Historical detection before realtime detection

* comment fixes

* remove logs

* remove logs

* remove logs

* gaz

* handle underflow

* add regressiion test

* update test name

* gaz

* gofmt

* fix comment

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-07-07 05:57:40 +03:00
terence tsao
7f741e48e0 Cleanup process block/operation names (#6500) 2020-07-06 18:24:30 -07:00
Preston Van Loon
e96e1f0569 Add a placeholder constant for ignoring cancelled subscription errors from libp2p pubsub. (#6496)
* Add a placeholder constant for ignoring cancelled subscription errors
from libp2p pubsub.

This change temporarily resolves
https://github.com/prysmaticlabs/prysm/issues/6449.

This change should be revisited after
https://github.com/libp2p/go-libp2p-pubsub/pull/356 merges and the prysm
libp2p pubsub dependency is updated.
* Merge branch 'master' into issue-6449
* Merge branch 'master' into issue-6449
* Merge branch 'master' into issue-6449
2020-07-06 23:55:55 +00:00
terence tsao
1a6c55c637 Split block_operations.go into smaller files (#6495) 2020-07-06 15:27:42 -07:00
Jim McDonald
7bb0ee78af Update beacon state locks (#6326)
* Rationalise state locking
* Rationalise state locking
* Merge branch 'state-locks' of github.com:mcdee/prysm into state-locks
* Merge branch 'master' into state-locks
* Add feature flag
* Merge
* Merge branch 'master' into state-locks
* Merge branch 'master' into state-locks
* Update locks
* Merge branch 'master' into state-locks
* Gazelle
* Tidy-ups
* Merge branch 'master' into state-locks
* Remove commentary to a docs.go file for better presentation on godocs
* Add newBeaconStateLocks as a --dev flag
* gofmt
* Merge branch 'master' into state-locks
2020-07-06 20:52:53 +00:00
Preston Van Loon
005a9dde48 CI: Remove GCP remote cache (#6497)
* Remove GCP remote cache as it has been sunset on GCP
2020-07-06 19:25:35 +00:00
terence tsao
def7b602e3 Hot states use no DB (#6488)
* Add cache to service struct
* Update hot getters/setters to use cache
* Update migration
* Update other services to adapt
* Fix initial sync get state
* Update getter related tests
* Update hot related tests
* Update migrate related tests
* New awesome tests for migration
* Clean up rest of the tests
* Merge refs/heads/master into hot-state-no-db
* Fix block chain head tests
* Fix block chain processor tests
* Fixed RPC tests
* Update cold getter and test
* Merge branch 'hot-state-no-db' of github.com:prysmaticlabs/prysm into hot-state-no-db
* Fix sync tests
* Short cut if state is already in DB
* Remove uneeded saves
* Update beacon-chain/state/stategen/hot_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/state/stategen/getter_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/state/stategen/getter_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/state/stategen/service.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/state/stategen/setter_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Preston's feedback
* Merge branch 'hot-state-no-db' of github.com:prysmaticlabs/prysm into hot-state-no-db
* Return a copy of cache states
* Remove hot state caches check in StateByRoot
* Merge branch 'hot-state-no-db' of github.com:prysmaticlabs/prysm into hot-state-no-db
* Merge refs/heads/master into hot-state-no-db
* Raul's feedback
* Merge branch 'hot-state-no-db' of github.com:prysmaticlabs/prysm into hot-state-no-db
2020-07-06 17:22:12 +00:00
Nishant Das
fa3da9a6a2 Add In New Core Methods for Batch Verify (#6482)
* add in new core methods

* clean up

* add verification method

* add tests

* add 1 more test

* lint

* Update beacon-chain/core/blocks/block_operations.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Update beacon-chain/core/blocks/block_operations.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Update beacon-chain/core/state/transition.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* terences review

* Update beacon-chain/core/state/transition.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* fmt

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-07-06 11:09:29 +08:00
Ivan Martinez
359b9bef49 Merge separated local slashing protection flags (#6487)
* Merge split-up local protection flags

* Fix deprecated flags

* Fix text
2020-07-03 21:01:54 -05:00
Raul Jordan
d447bb6458 Direct Keymanager: Implement Sign (#6477)
* implement keymanager proto

* utilize proto message and properly comment

* add in godoc

* doc

* add in doc

* sign implementation done

* add documentation for direct keymanager and add lock to signing

* sign method tests

* update godoc

* concrete sign

* fix build

* proper iface
2020-07-03 18:00:02 -05:00
Raul Jordan
8c258278d0 Direct Keymanager: Implement FetchValidatingPublicKeys() (#6475)
* begin on fetch direct
* fetch validating public keys impl
* test for fetch validating keys
* fetch validating public keys done
* helper function and benchmark
* rename package
* viz
* Update validator/accounts/v2/testing/BUILD.bazel

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* gaz
* add lock
* comment
* Merge refs/heads/master into fetch-direct-keys
2020-07-03 20:26:00 +00:00
Ivan Martinez
f0fcebccc4 Move slashing protection code to separate files for proposing and attesting (#6406)
* Get started on cleaning up slashing protection
* Merge branch 'master' of github.com:prysmaticlabs/prysm into cleanup-protection
* Move protection functions to own files
* Lint
* Merge branch 'master' of github.com:prysmaticlabs/prysm into cleanup-protection
* Begin adding test for proposal protection
* Merge branch 'master' of github.com:prysmaticlabs/prysm into cleanup-protection
* Fix build
* Fix tests
* Fix tst
* Fix tests
* Fix proposal tests
* Merge branch 'master' into cleanup-protection
* Merge branch 'master' into cleanup-protection
* Merge branch 'master' into cleanup-protection
* Merge branch 'master' of github.com:prysmaticlabs/prysm into cleanup-protection
* Reorder protections
* Change lock
* Fix test
* Merge branch 'master' into cleanup-protection
* Merge branch 'master' into cleanup-protection
* Merge branch 'master' into cleanup-protection
* Change log
* Merge branch 'cleanup-protection' of github.com:prysmaticlabs/prysm into cleanup-protection
* Merge branch 'master' into cleanup-protection
2020-07-03 19:54:42 +00:00
Raul Jordan
9d979de4ed Direct Keymanager: Implement Account Creation (#6466)
* implementation using petname and keystore
* writing new account to disk along with password
* more logic for properly writing accounts
* print out mnemonic
* save deposit data rlp
* write deposit tx and ssz deposit data to account path
* wrap up account creation
* fix prompt
* generate deposit tx
* direct account creation test
* fix up formatting
* lint
* match formatting
* more sustainable approach towards unmarshaling config file
* resolve feedback
* fix broken import
* comprehensive tests for create account
* tests pass
* Merge branch 'master' into direct-keys
* tidy
* Merge branch 'direct-keys' of github.com:prysmaticlabs/prysm into direct-keys
* Merge refs/heads/master into direct-keys
* gaz
* Merge branch 'direct-keys' of github.com:prysmaticlabs/prysm into direct-keys
* nondeterministic names
* comment
* gaz
* better error wrap
* Merge refs/heads/master into direct-keys
* docker deps
* Merge branch 'direct-keys' of github.com:prysmaticlabs/prysm into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* Merge refs/heads/master into direct-keys
* ivan feedback
* Merge refs/heads/master into direct-keys
* Update validator/accounts/v2/wallet.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* fixed tests and comments
* Merge refs/heads/master into direct-keys
2020-07-03 18:49:16 +00:00
terence tsao
8bb5532377 Epoch boundary cache (#6486)
* Add epoch boundary state cache
* Clean up epoch boundary state cache
* Starting tests
* More tests
* Merge branch 'master' of github.com:prysmaticlabs/prysm into epoch-boundary-cache
* Merge refs/heads/master into epoch-boundary-cache
2020-07-03 17:29:30 +00:00
Victor Farazdagi
688f0d7114 Refactors attestation pool pruning (#6485) 2020-07-03 09:43:56 -07:00
Ivan Martinez
c69b3f568e Move hard-coded constants to global consts (#6467)
* Move hard-coded constants to global consts
* Fixes
* Merge branch 'master' into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Comments
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge branch 'master' of github.com:prysmaticlabs/prysm into remove-hard-constants
2020-07-03 15:25:32 +00:00
Victor Farazdagi
d1f1628478 Restores att-aggregation benchmarks (#6484)
* restores att-aggregation benchmarks
2020-07-03 14:37:54 +00:00
Nishant Das
7896e087e5 Add in Better Logging for Requests (#6483)
* add in better logging
* gaz
* last one
* mess about
* check strings
* fix
2020-07-03 10:08:45 +00:00
Nishant Das
ac9bc5d98b Reset Streams at the End of the Request (#6481)
* close all incoming streams

* use proper helper

* shift to debug

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-03 16:21:18 +08:00
Nishant Das
353229d50a Add New Secure Batch Signature Verification Method (#6479)
* add more tests
* gaz
* Merge refs/heads/master into addBLSMethods
* Merge refs/heads/master into addBLSMethods
* Merge refs/heads/master into addBLSMethods
* Merge refs/heads/master into addBLSMethods
2020-07-03 07:38:13 +00:00
terence tsao
d62e989a67 Minor clean up on state gen package (#6480)
* Remove unused fields and function
* Rename splitInfo to finalizedInfo
* Merge branch 'master' into stategen-cleanup
* Fmt
* Merge branch 'stategen-cleanup' of github.com:prysmaticlabs/prysm into stategen-cleanup
* Merge refs/heads/master into stategen-cleanup
2020-07-03 06:33:58 +00:00
Preston Van Loon
6b9d9e5d3a Add a few test cases to ReceiveBlockNoPubsub with race detection (#6298)
* Add a few test cases to ReceiveBlockNoPubsub with race detection, fix data race in state
* Expose voluntary exits included API. More race condition fixes in state. More tests
* add tests to norace as well for coverage
* Merge branch 'master' of github.com:prysmaticlabs/prysm into add-test-coverage-on-receive-block
* PR feedback
* Fixed can't save head in DB by:
1.) Remove new state mgmt condition in SaveHeadBlockRoot
2.) Reuse state summary cache between db and service
* Merge branch 'master' of github.com:prysmaticlabs/prysm into add-test-coverage-on-receive-block
* passing tests
* gofmt and goimports
* Merge branch 'master' of github.com:prysmaticlabs/prysm into add-test-coverage-on-receive-block
* lint
* move import and goimport again
* move import and goimport again
* Merge refs/heads/master into add-test-coverage-on-receive-block
* Merge refs/heads/master into add-test-coverage-on-receive-block
* Add bool to selectively record events
* Merge refs/heads/master into add-test-coverage-on-receive-block
* Merge refs/heads/master into add-test-coverage-on-receive-block
* Merge refs/heads/master into add-test-coverage-on-receive-block
* Merge refs/heads/master into add-test-coverage-on-receive-block
2020-07-03 05:46:53 +00:00
dv8silencer
0d2bce8200 Improve consistency in peer connect/disconnect logging (#6409)
* Improve consistency in peer connect/disconnect logging
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Corrected capitalization.  Refactored out duplicated logic
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
* Merge branch 'master' into dv8s-iss6328
2020-07-03 04:44:57 +00:00
Raul Jordan
5b708b54d9 QSP-13 Only Allow Snappy P2P Encoding (#6415)
* enforce only snappy

* fix up tests

* replace with ssz snappy in tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-03 11:24:30 +08:00
Preston Van Loon
aa52e693ce Sync: do not remove attestations from pool on failed block processing (#6474)
* Add test and return error on failed block processing
* use atts in body
* Merge branch 'master' into p2p-potential-error-block
2020-07-02 20:24:30 +00:00
Raul Jordan
091525deb8 Revert Stream Duties (#6473)
* ensure gazelle
* fix up propose test
* build
* comment
2020-07-02 17:50:05 +00:00
Victor Farazdagi
f279cf09b8 Removes excessive concurrency from init-sync queue (#6471)
* Removes excessive concurrency from init-sync queue
* update buffer size
2020-07-02 13:06:12 +00:00
Victor Farazdagi
92c5c651f7 Refactors init-sync: queue streams blocks in batches (#6470)
* init-sync: queue streams blocks in batches
2020-07-02 11:19:07 +00:00
Raul Jordan
2d6f4ebf18 Part 1: Implement Accounts-V2 New, Wallet Creation (#6451)
* begin accounts-v2 new

* password validation

* validator accounts new with eip-2335 keystore

* select different wallet type based on enum

* clean up code significantly

* more robust code structure

* check if wallet exists

* define read and create wallet methods

* fmt

* go mod and comment

* comment

* redundant name

* satify gofmt

* add instructions with keymanager opts

* wrap up create and read wallet functionality

* prep for readiness

* doc improvements

* tests for create and read wallet

* update deps

* tidy

* visibility

* gaz

* fix up

* refactor for proper usage, with wallet and keymanager ifaces

* Update validator/flags/flags.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* import

* improve structure

* wrap up all comments

* simplify

* lint

* Update validator/accounts/v2/cmd.go

* viz check

* add interface methods as needed

* fix build

* lint

* nishant feedback

* simplify structure

* add tests for strong password check

* all feedback done

* ivan feedback

* ivan feedback

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-07-01 16:30:01 -05:00
terence tsao
8c8cc144f1 Archival RPC usages removal (#6453)
* Starting to delete archived rpc usages
* Fixed rest of the tests
* Gaz
* Merge branch 'master' into deprecate-archival-rpc
* Merge branch 'master' into deprecate-archival-rpc
* Merge refs/heads/master into deprecate-archival-rpc
* Merge refs/heads/master into deprecate-archival-rpc
* Merge refs/heads/master into deprecate-archival-rpc
2020-07-01 20:04:02 +00:00
Martin Linkhorst
aeb0b4bf75 Allow to start slasher RPC server with TLS (#6465)
* add warning if slasher server isn't using tls
* pass on tls key and cert to the slasher rpc server
* Merge branch 'master' into slasher-tls
2020-07-01 19:31:29 +00:00
Victor Farazdagi
270a64f059 QSP-BestPractice: Refactors singular/plural KV-store functions (#6462)
* updates SaveBlock
* updates DeleteAttestation
* updates DeleteState
* updates SaveAttestation
* updates SaveState
* updates SaveStateSummary
* Merge refs/heads/master into kv-refactor-singular-pl-methods
* Merge refs/heads/master into kv-refactor-singular-pl-methods
* Merge refs/heads/master into kv-refactor-singular-pl-methods
2020-07-01 18:33:05 +00:00
Preston Van Loon
165cb0d7b4 E2E: Add a check for cold state storage (#6457)
* Add a check for cold state storage
* amend comment
* gaz
* Merge branch 'master' into coldstate-e2e
* Merge refs/heads/master into coldstate-e2e
* Merge refs/heads/master into coldstate-e2e
* PR feedback
* Merge branch 'coldstate-e2e' of github.com:prysmaticlabs/prysm into coldstate-e2e
* Update endtoend/evaluators/data.go
* Merge refs/heads/master into coldstate-e2e
* Merge refs/heads/master into coldstate-e2e
* Merge refs/heads/master into coldstate-e2e
2020-07-01 17:35:05 +00:00
terence tsao
1f3d9873cd Archival service removal (#6448) 2020-07-01 10:04:06 -07:00
Victor Farazdagi
1b0cfb408e QSP-BestPractice: Refactors inline ttfb/resp time constants (#6463)
* refactors inline ttfb/resp time constants
2020-07-01 09:47:59 +00:00
terence tsao
4e180e452d Add spans to db kv's encode and decode (#6458) 2020-06-30 21:00:49 -07:00
terence tsao
8d8da76aa0 API: Fix finalization/justification slot representations (#6443)
* Fix finalized/justified slots representations
* Update tests
* Merge refs/heads/master into use-epochs
* Merge refs/heads/master into use-epochs
* Merge refs/heads/master into use-epochs
* Merge refs/heads/master into use-epochs
* Go fmt
* Merge branch 'use-epochs' of github.com:prysmaticlabs/prysm into use-epochs
* Merge refs/heads/master into use-epochs
* Merge refs/heads/master into use-epochs
* Fixed TestServer_StreamChainHead_OnHeadUpdated
* Merge branch 'use-epochs' of github.com:prysmaticlabs/prysm into use-epochs
* Merge refs/heads/master into use-epochs
2020-06-30 15:17:59 +00:00
Raul Jordan
a0c38c8d0a QSP-33 Check Max Response For All Topics (#6424)
* remove max len funcs

* fix up tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-30 09:50:28 -05:00
Nishant Das
21ead0a025 Record Peer Latency (#6456)
* add latency
* Merge branch 'master' into addLatency
* Merge refs/heads/master into addLatency
2020-06-30 03:53:14 +00:00
Preston Van Loon
98e26d1dcc Bulk verify block deposits with BLS (#6454)
* First pass at adding bulk signature verification for block deposits

* checkpoint

* some refactoring to allow for bulk deposit signature verification

* nil check

* fix goimports

* fmt

* rename ProcessPreGenesisDeposit -> ProcessPreGenesisDeposits
2020-06-29 20:52:19 -05:00
terence tsao
1f93a1f4c3 Remove unused prune old states (#6450) 2020-06-29 15:02:31 -07:00
Raul Jordan
6c7131cb54 Fix Concurrent Map Write in gRPC Client Connected Logger (#6446)
* fix up concurrent map write
* fix import
* Merge refs/heads/master into concurrent-write-fix
2020-06-29 15:52:10 +00:00
Victor Farazdagi
196f4c6222 QSP-BestPractice Refactors failover peer selection in init-sync (#6442)
* refactors failover peer selection in init-sync
* Merge refs/heads/master into init-sync-more-effective-failover
2020-06-29 15:23:04 +00:00
Ivan Martinez
e7b24323e6 Move MaxRPCPageSize flag from beacon node to shared/cmd (#6297)
* Allow other clients to set MaxRPCPageSize

* Fixes

* Remove default

* Add init

* Fix default

* Set default

* Fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-29 09:17:24 -05:00
Preston Van Loon
047880caf2 Change the aggregatedSlotCommitteeIDCacheLock to release the lock immediately after r/w (#6437)
* Change the aggregatedSlotCommitteeIDCacheLock to release the lock immediately after r/w
* Merge branch 'master' into better-validator-lock
* Merge branch 'master' into better-validator-lock
2020-06-29 06:14:21 +00:00
Nishant Das
2bce9c884a Add More Altona Bootnodes (#6441)
* enable noise
* add secio
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into makeNoiseDefault
* add new bootnodes
2020-06-29 04:02:20 +00:00
Nishant Das
adf9bf7094 QSP 50, QSP 51: Make Noise Default (#6440)
* enable noise

* add secio
2020-06-29 11:19:25 +08:00
Preston Van Loon
bfb6e5e4a0 Use spec attestation time verification in gRPC requests (#6429)
* Re-use attestation time verification

* lint

* fix imports

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-28 14:37:17 -07:00
Ivan Martinez
d973c00c6c Slasher detection improvements (#6432)
* Slashing fixes

* Fixess

* Change fatals to error

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-28 02:34:21 -04:00
Raul Jordan
417bb45398 Begin Scaffold of Validator Accounts V2 (#6310)
* v2 refactor
* fix up imports
* builds
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge branch 'master' into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* imports keymanager v1
* Update validator/keymanager/v1/remote.go
* Update validator/client/polling/propose.go
* Update validator/keymanager/v1/wallet.go
* fmt
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* Merge refs/heads/master into accounts-revamp-p1
* fix conf
* fix build
* gaz
2020-06-27 15:48:36 +00:00
terence tsao
2a997828a3 Audit best practice feedback - validator DB test folder (#6426)
* Refactor validators db

* Lint

* Lint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-26 21:37:43 -05:00
Preston Van Loon
a9bbbae19a Refactor params/config to separate files for clarity (#6427)
* split mainnet/onyx/altona/e2e configs into different files
* set onyx as the default network
* lint and fix comment
* more commentary
* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into onyx-config
* gofmt
2020-06-27 00:20:24 +00:00
terence tsao
43b1bcb84f Use CopyValidator for ValidatorAtIndex (#6430)
* Use CopyValidator
* Merge refs/heads/master into validator-at-index
2020-06-26 23:21:11 +00:00
Preston Van Loon
c6179307b3 Add spans for head state access (#6418)
* Add a span for HeadState access
* more spans
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
* Merge refs/heads/master into add-span
2020-06-26 22:52:11 +00:00
Nishant Das
5d53aa3cac Update Config for Altona (#6414) 2020-06-26 15:11:40 -07:00
Preston Van Loon
67b8a88b65 Add span for updateRecentCanonicalBlocks (#6421)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-26 15:08:53 -05:00
Preston Van Loon
3ba3133657 Cleanup validateAggregateAttTime with a better error message, more tests, and more clear inline comments (#6423)
* Add span for updateRecentCanonicalBlocks
* Cleanup validateAggregateAttTime with a better error message, more tests, and more clear inline comments
* Merge branch 'master' into cleanup-validateAggregateAttTime
* revert beacon-chain/blockchain/head.go
* Merge branch 'cleanup-validateAggregateAttTime' of github.com:prysmaticlabs/prysm into cleanup-validateAggregateAttTime
* Merge refs/heads/master into cleanup-validateAggregateAttTime
* Merge refs/heads/master into cleanup-validateAggregateAttTime
2020-06-26 19:36:44 +00:00
terence tsao
01b97ffd5e Fix ListValidators retrieval for new state (#6417)
* Retrieve the correct state for new state
* Test
* Gaz
* Merge branch 'master' into fix-list-validators
* Merge refs/heads/master into fix-list-validators
* Merge refs/heads/master into fix-list-validators
* Merge refs/heads/master into fix-list-validators
* Merge refs/heads/master into fix-list-validators
2020-06-26 19:01:46 +00:00
Preston Van Loon
886afb054c Remove duplicated randao mix copy (#6420)
* remove duplicated randao mix copy
* Merge refs/heads/master into rm-dup-copy
* Merge refs/heads/master into rm-dup-copy
* Merge refs/heads/master into rm-dup-copy
2020-06-26 18:27:48 +00:00
Nishant Das
98757e759c Update Geth (#6422)
* update geth
* Merge refs/heads/master into removeSlowdown
2020-06-26 17:59:02 +00:00
Raul Jordan
001547f215 QSP-55 Remove Support for Other Pubsub Protocols (#6419)
* remove nongossip protocols
* Merge refs/heads/master into remove-other-protocols
2020-06-26 17:33:18 +00:00
Nishant Das
f1c0b2e0da Fix Invalid SSZ in Block Roots RPC Request (#6405)
* fix block roots error
* remove comment
* fix
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fixBlockRootsError
* change back
* add stream deadlines
* Merge branch 'master' into fixBlockRootsError
* add todos
* Merge branch 'fixBlockRootsError' of https://github.com/prysmaticlabs/geth-sharding into fixBlockRootsError
* Update beacon-chain/sync/rpc_beacon_blocks_by_root.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* gofmt
* fix test
* fix failing test
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fixBlockRootsError
* fix all
* Merge branch 'master' into fixBlockRootsError
* Merge branch 'master' into fixBlockRootsError
* Merge refs/heads/master into fixBlockRootsError
2020-06-26 17:05:04 +00:00
Raul Jordan
ab89053597 QSP-45 Add Check for No Connected Peers BestFinalized (#6402)
* no error returned
* no target root
* fix build
* Merge refs/heads/master into best-finalized
2020-06-26 16:35:12 +00:00
Raul Jordan
252f758baa QSP-9 Prevent Casting to Int if Possible (#6349)
* no cast to int

* fix up significant casting issues

* more casting

* even more casting fixes

* more casts

* fix subnets

* back to ints

* final touches

* broken test fix

* add in blocks test fix

* unskip

* revert bytes fixes

* casting fixes

* Update beacon-chain/db/kv/state.go

* Update beacon-chain/db/kv/blocks.go

* fmt

* slash:

* fix val tests

* fix up conf

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-06-26 11:07:00 -05:00
Victor Farazdagi
78465e2549 QSP-6: Enforces crypto-secure PRNGs (#6401)
* adds cryptorand analyzer

* better naming

* rely on suffix

* sync/pending_* use crypto/rand

* define shared/rand

* updates fetcher

* fixes rand issue in sync package

* gofmt

* shared/rand: more docs + add exclusion nogo_config.json

* updates validator/assignments

* updates comment

* fixes remaning cases

* re-arranges comments

* fixes tests

* renames in shared/rand API

* adds simple no-panic test

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-26 09:58:47 -05:00
Nishant Das
73f3a653bf Clean Up Dead Peers (#6413)
* clean up dead peers
* Merge refs/heads/master into clearDeadPeers
* comment
* Merge branch 'clearDeadPeers' of https://github.com/prysmaticlabs/geth-sharding into clearDeadPeers
2020-06-26 12:24:55 +00:00
dv8silencer
812a663671 Add flag to allow reading bootstrap nodes from a file (#6351)
* parent c837dfb2f2
author dv8silencer <dv8silencer+github@gmail.com> 1592805180 -0500
committer dv8silencer <dv8silencer+github@gmail.com> 1592855989 -0500
Create a bootstrap-node-file flag which reads nodes from a YAML file and make the flag mutually exclusive to the bootstrap-node flag
* Merge branch 'master' into dv8s-iss6316
* Refactor so that boot node reading is in its own function.  Added test
* Added period

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Merge branch 'master' into dv8s-iss6316
* Edit Bazel to include go-yaml dep
* Help Bazel build successfully
* Merge branch 'master' into dv8s-iss6316
* bazel run //:gazella -- fix
* Handle error from YAML parser to address build checks
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Updated flags.go to refine the Usage for the BootStrapNodeFile flag

Co-authored-by: Nishant Das <nish1993@hotmail.com>
* Merge branch 'master' into dv8s-iss6316
* Utilize stdlib TempFile to avoid polluting
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* merge master into this branch
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Merge branch 'master' into dv8s-iss6316
* Changed bootstrap-node to StringSlice flag to allow multiple nodes to be passed.  Each value can be .enr file which will be YAML parsed to extract nodes
* Refactored to create separate readbootNodes function and added a test for it.
* More cleaning up
* Changed wording in the cli help for --bootstrap-node
* Merge branch 'master' into dv8s-iss6316
* Since we are taking YAML files, got rid of .enr check and instead we check for the string prefix of a enr record or not to determine if file.
* Merge with dv8silencer/tempissue6316
* Correct spacing

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Changed how we check for YAML file vs CLI-direct ENR
* Minor:  Changed to 1 string from concatenating 2
* Merge branch 'master' into dv8s-iss6316
2020-06-26 11:23:38 +00:00
terence tsao
461bc4694f QST40 QST42 QST44 - Move deletes to test scope only (#6410)
* Rm delete from interface
* Rm passthroughs
* Unexport
* Lint
* Merge branch 'master' into unexport-delete
* Merge refs/heads/master into unexport-delete
* Merge refs/heads/master into unexport-delete
* Merge refs/heads/master into unexport-delete
2020-06-26 04:29:13 +00:00
Preston Van Loon
806a465117 Remove unused p2p encoding method (#6411)
* Remove unused p2p encoding method
2020-06-26 04:02:09 +00:00
Raul Jordan
1b430e0c17 QSP-62 Corrupted Clock In Stream Deadlines (#6404)
* deadline warn
* add proper godoc information
* clarify
* Merge refs/heads/master into corrupted-clock
* fix broken build
* Merge refs/heads/master into corrupted-clock
* Merge refs/heads/master into corrupted-clock
2020-06-25 22:36:18 +00:00
Raul Jordan
6b27452947 QSP-39 AreEth1DataEqual Should Return True If Both Nil (#6372)
* resolve small core issue

* add tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-25 18:01:58 -04:00
terence tsao
ac69dbc2f8 Deprecates --disable-init-sync-batch-save-blocks flag (#6403)
* Deprecate flags in feature config

* Deprecate usages

* Update test

* Go fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-25 15:55:43 -05:00
Raul Jordan
45e1c0bb8c QSP-54 Handle Default Datadir Empty String (#6394)
* default datadir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Update node.go
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
* Merge refs/heads/master into default-data-dir
2020-06-25 18:02:07 +00:00
Shay Zluf
9103ec98cb QSP-14 consistent file permissions (#6378)
* QSP-14 consistent file permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* default io file permissions
* Merge branch 'consistent_permissions' of github.com:prysmaticlabs/prysm into consistent_permissions
* fix comments
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* gaz
* Merge branch 'consistent_permissions' of github.com:prysmaticlabs/prysm into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Rename IoConfig.go to io_config.go
* Update shared/params/BUILD.bazel
* Merge refs/heads/master into consistent_permissions
* raul feedback
* Merge branch 'consistent_permissions' of github.com:prysmaticlabs/prysm into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* gaz
* Merge branch 'consistent_permissions' of github.com:prysmaticlabs/prysm into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
* Merge refs/heads/master into consistent_permissions
2020-06-25 16:12:59 +00:00
Victor Farazdagi
00f24f5729 Deprecates --disable-init-sync-wrr flag (#6400)
* deprecates wrr-related flag

* gofmt + gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-25 10:26:30 -05:00
Raul Jordan
90bfc9a395 QSP-49 Recalibrate Roughtime (#6393)
* roughtime recalibrate

* recalibrate roughtime

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-25 09:02:54 -05:00
Nishant Das
5fdf6310f7 add back handler (#6398)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-25 13:07:31 +03:00
Raul Jordan
662c5cd302 QSP-47 Remove Meaningless Relay Addr Check (#6388)
* add relay factory fix
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
* Merge refs/heads/master into qsp-47
2020-06-25 09:16:30 +00:00
Raul Jordan
26e27bf052 QSP-41 Clarify Block Filters Passthrough In DB (#6397)
* clarify filters
* Merge refs/heads/master into blocks-filter-criteria
* Merge refs/heads/master into blocks-filter-criteria
2020-06-25 08:31:07 +00:00
terence tsao
9a156bd73a Best practice follow up - remove outdated todos (#6392)
* Addressed 30.

* Remove outdated todos

* Go fmt

* Go fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-24 23:14:11 -05:00
Preston Van Loon
412ea2dca1 Update CONTRIBUTING.md to include link to docs portal (#6396)
* Update CONTRIBUTING.md
* Merge branch 'master' into prestonvanloon-patch-1
2020-06-25 02:59:49 +00:00
Raul Jordan
0b178b446a QSP-64 Conditional for Rebuilding BlockRoots Trie in State Setters (#6390)
* rebuild trie for block roots
* Merge refs/heads/master into rebuild-trie-fix
* Merge refs/heads/master into rebuild-trie-fix
2020-06-25 02:03:18 +00:00
Preston Van Loon
fb8be4d555 Refactor BLS (#6395)
* refactor BLS to expose an interface rather than a single implementation
* Split up tests, gofmt, goimports, docs
* godoc for signature.Copy()
* more godocs
* revert gomod / deps changes
* rm newline
* revert proto changes
* rename bls12 to herumi for clarity
2020-06-25 00:47:51 +00:00
terence tsao
d0e2e0e979 Best practice feedback - part 1 (#6385)
* log error for metric
* reuse epoch e
* Better node comments
* Remove not needed breaks
* Use j over i
* Merge branch 'master' of github.com:prysmaticlabs/prysm into best-practice-pt1
* Descendent/Descendant
* Merge refs/heads/master into best-practice-pt1
* Update beacon-chain/forkchoice/protoarray/nodes.go
* Merge refs/heads/master into best-practice-pt1
* Merge refs/heads/master into best-practice-pt1
2020-06-24 21:09:47 +00:00
Raul Jordan
ac77a5c054 QSP-25 Only Enable Relay if Flag Provided (#6386)
* only add relay if flag is on
* Merge branch 'master' into qsp-25
* Merge refs/heads/master into qsp-25
* Merge refs/heads/master into qsp-25
2020-06-24 19:30:40 +00:00
terence tsao
a9c1d25a35 Best practice feedback - part 2 (#6389)
* Feedback 10. Rename to highest epoch
* Feedback 11. Rename to pidepoch
* Feedback 39. Swap length
* Typo
* Merge refs/heads/master into best-practice-pt2
2020-06-24 18:06:19 +00:00
Ivan Martinez
e27ed8174b Remove validator filename prefix from validator/client (#6387)
* Remove validator prefix from files
* Gaz
* Merge branch 'master' of github.com:prysmaticlabs/prysm into val-rename-files
2020-06-24 17:43:24 +00:00
Raul Jordan
bfce4c7f63 QSP-65 Clarify Why UnshuffleList is Used (#6381)
* clarify why unshuffle list
* Update beacon-chain/core/helpers/committee.go
* Merge branch 'master' into qsp-65
2020-06-24 16:57:09 +00:00
Nishant Das
045a983032 Add Altona Config (#6380) 2020-06-24 07:03:16 -07:00
Shay Zluf
b8e1bf0445 Fix proposer slashing context timeout test (#6379)
* Fix proposer slashing context timeout test
* goimports
* revert slashing change
* Merge branch 'master' into fix_proposer_slashing_test
2020-06-24 12:10:40 +00:00
Shay Zluf
edb08bd209 QPS-20 implement removeDisconnectedPeerStatus (#6370)
* QPS-20 implement removeDisconnectedPeerStatus
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* remove function
* Merge branch 'remove_disconnected_peer_status' of github.com:prysmaticlabs/prysm into remove_disconnected_peer_status
* Merge refs/heads/master into remove_disconnected_peer_status
* Merge branch 'remove_disconnected_peer_status' of github.com:prysmaticlabs/prysm into remove_disconnected_peer_status
2020-06-24 10:40:33 +00:00
Nishant Das
e45a06cbf1 Revert "QSP-42 Remove Double Unsubscribe in Initial Sync" (#6376)
* Revert "QSP-42 Remove Double Unsubscribe in Initial Sync (#6368)"

This reverts commit 637354f037.

* comment

* add go fmt
2020-06-24 17:01:48 +08:00
Ivan Martinez
e9a597b43c [QSP-6] Change usages of math/rand to crypto/rand (#6362)
* Change math/rand to crypto/rand
* Change length
* Merge branch 'master' of github.com:prysmaticlabs/prysm into change-to-crypto-rand
* Change assignments to true random
* Merge branch 'master' into change-to-crypto-rand
* Change randomness
* Merge branch 'change-to-crypto-rand' of github.com:prysmaticlabs/prysm into change-to-crypto-rand
* Remove comments
* Test
* Fix test
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
* Merge refs/heads/master into change-to-crypto-rand
2020-06-24 05:47:51 +00:00
terence tsao
a4cfd147ff Address spec adherence feedbacks (#6365)
* Addressed 1
* Address 2.
* Addressed 3. and 4.
* Addressed 6.
* Addressed 7
* Addressed 8
* Addressed 9.
* Addressed 10.
* Addressed 11.
* Addressed 12.
* Addressed 13.
* Delete old test
* Merge refs/heads/master into spec-feedbacks
* Change comment "pure" to "stateless"
* Fmt
* Typo
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
* Merge refs/heads/master into spec-feedbacks
2020-06-24 03:14:44 +00:00
Preston Van Loon
7662045cc6 Disable --enable-stream-duties for e2e tests (#6374)
* Disable --enable-stream-duties for e2e tests
* Merge branch 'master' into fix-e2e
* Merge branch 'master' into fix-e2e
* Merge branch 'master' into fix-e2e
2020-06-24 02:42:38 +00:00
Ivan Martinez
9fadb4f334 Address best practices in validator client (#6375)
* Best practices validator

* Add ctx

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-24 09:53:01 +08:00
Nishant Das
e9057185a5 Fix Peering Issues in Discovery (#6346)
* add current changes
* comment
* Merge branch 'master' into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* clean up
* add comment
* Merge refs/heads/master into fixPeeringIssues
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
* Merge refs/heads/master into fixPeeringIssues
2020-06-24 01:09:09 +00:00
Nishant Das
24d09f083f Release State Reference Copy (#6341)
* release flag
* add comment
* improve
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge branch 'master' into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into releaseStateRef
* remove old flags
* gaz
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
* Merge refs/heads/master into releaseStateRef
2020-06-24 00:24:59 +00:00
Raul Jordan
637354f037 QSP-42 Remove Double Unsubscribe in Initial Sync (#6368)
* rem double unsub
* Update beacon-chain/sync/initial-sync/service.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Merge refs/heads/master into qsp-unsub
* Merge refs/heads/master into qsp-unsub
* Merge refs/heads/master into qsp-unsub
* Merge refs/heads/master into qsp-unsub
* Merge refs/heads/master into qsp-unsub
2020-06-23 23:50:52 +00:00
terence tsao
c3adde3b32 QSP10, QSP11 - Add proper locking (#6350)
* Proper locking based on audit feedbacks
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Preston's feedback
* Merge branch 'qsp10-11' of github.com:prysmaticlabs/prysm into qsp10-11
* Unlock in invalid index branch
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge branch 'master' into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
* Merge refs/heads/master into qsp10-11
2020-06-23 23:08:08 +00:00
Raul Jordan
2bdb825c04 QSP-61 Clarify Why Process Pending Attestations May Exit Early (#6371)
* clarify with comment
* Merge refs/heads/master into qsp-61
* Merge refs/heads/master into qsp-61
* Merge refs/heads/master into qsp-61
2020-06-23 22:34:31 +00:00
Raul Jordan
7ba26aa433 Add Mutex to Hot State Cache (#6366)
* mutex to hot state cache
* Merge branch 'master' into add-hotstate-lock
* Merge refs/heads/master into add-hotstate-lock
* Merge refs/heads/master into add-hotstate-lock
2020-06-23 22:06:47 +00:00
terence tsao
374a84d577 Remove new state mgmt within db package (#6373)
* Remove new state mgmt within db package
* Merge refs/heads/master into rm-new-state-db
2020-06-23 21:36:21 +00:00
Raul Jordan
e1310adb4c QSP-Docs All Documentation Improvements from Quantstamp Audit Report (#6367)
* docs improvements so far
* all misc docs items
* finalized
* Merge branch 'master' into qsp-docs
* fix broken test
* Merge branch 'master' into qsp-docs
2020-06-23 21:11:20 +00:00
Preston Van Loon
f9038674d1 Refactor setupDB to return the state summary cache used by the database (#6369) 2020-06-23 13:40:55 -07:00
Victor Farazdagi
33b2636bf1 Enables new aggregation implementation (#6344)
* adds feature flag
* aggregations/attestation package
* better tests
* bazel visibility issues
* removes redundant code
* Merge branch 'master' into aggregation-attestations-package
* updates previous implementation
* Merge branch 'master' into aggregation-enable-new-aggregation
* resolve merge conflict
* resolve merge conflict (more)
* Merge branch 'master' into aggregation-enable-new-aggregation
* force max_cover aggregation on --dev
* Merge branch 'aggregation-enable-new-aggregation' of github.com:prysmaticlabs/prysm into aggregation-enable-new-aggregation
* Merge branch 'master' into aggregation-enable-new-aggregation
* Merge refs/heads/master into aggregation-enable-new-aggregation
* Merge refs/heads/master into aggregation-enable-new-aggregation
* Merge refs/heads/master into aggregation-enable-new-aggregation
* Merge refs/heads/master into aggregation-enable-new-aggregation
* Merge refs/heads/master into aggregation-enable-new-aggregation
2020-06-23 19:13:55 +00:00
Shay Zluf
96a110a193 External slashing protection not requiring signature (#6252)
* validation without signature
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* validation and update funcs
* Merge branch 'slashing_protection_no_sign' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge branch 'master' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign
* Merge branch 'slashing_protection_no_sign' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign
* change order
error handling
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* ivan feedback
* Merge branch 'slashing_protection_no_sign' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* add tests to blocks utils
* terence feedback
* reduce visibility
* Merge branch 'master' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign

# Conflicts:
#	validator/client/polling/validator_attest.go
#	validator/client/polling/validator_propose.go
* fix metrics
* fix error
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* copy behaviour to streaming
* Merge branch 'slashing_protection_no_sign' of github.com:prysmaticlabs/prysm into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
* Merge refs/heads/master into slashing_protection_no_sign
2020-06-23 16:46:48 +00:00
terence tsao
c417b00675 Fix cold state replay to start at correct slot (#6361)
* Fix off by 1
* Regression tests
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fix-offset
* Go fmt
* Merge refs/heads/master into fix-offset
* Merge refs/heads/master into fix-offset
2020-06-23 16:04:42 +00:00
Ivan Martinez
81786159e9 Move permanent globally needed flags to shared/cmd and makes them accessible (#6312)
* Move permanent flags to shared/cmd and make them accessible
* Merge branch 'master' of github.com:prysmaticlabs/prysm into move-flags-to-cmd
* Add test
* Add comment
* Fix gaz
* Replace with new config
* Gaz
* replace
* gaz
* Merge branch 'master' into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Prestons comments
* Nishant comments
* Merge branch 'move-flags-to-cmd' of github.com:prysmaticlabs/prysm into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* Merge branch 'master' into move-flags-to-cmd
* Merge refs/heads/master into move-flags-to-cmd
* fmt
2020-06-23 15:41:20 +00:00
Anton
fe6cf9f33b SubscribeCommitteeSubnets fields length check (#6359)
* SubscribeCommitteeSubnets fields length check

A case where `len(req.Slots) != len(req.CommitteeIds)` but `len(req.CommitteeIds) == len(req.IsAggregator)`
should also throw an error. Without this fix only 2 length inequalities will throw the error.

Greetings from PwC Switzerland
* Added diff length SubscribeCommitteeSubnets test
* Merge branch 'master' into patch-1
2020-06-23 14:50:46 +00:00
Victor Farazdagi
078c157f56 Resolves issues from the best practices in beacon-chain/db (#6358)
* fixes 51
* fixes 52
* fixes 53
* fixes 54
* fixes 56
* Merge branch 'master' into db-best-practices
* fixes 62
* fixes 64
* fixes 65
* fixes 68
* fixes 69
2020-06-23 13:51:00 +00:00
Victor Farazdagi
22b3123ebe Resolves issues from the best practices in sync package (#6357)
* removes redundant locks
* fixes 37
* fixes 38
* reverts lock removal
* Merge branch 'master' into sync-best-practices-updates
* Merge refs/heads/master into sync-best-practices-updates
2020-06-23 12:00:29 +00:00
Victor Farazdagi
64b0f775d3 Fixes receiver name in db/kv (#6356)
* Fixes receinver name in db/kv
* Merge refs/heads/master into fix-receiver-name
* Merge refs/heads/master into fix-receiver-name
2020-06-23 10:15:01 +00:00
Victor Farazdagi
89ef3cfdc6 Fixes error message wording (#6355)
* fixes error message wording
* Merge refs/heads/master into fix-error-msg-wording
2020-06-23 09:44:39 +00:00
Nishant Das
36c82b26e4 Minor Improvements (#6353)
* minor improvements
* Update beacon-chain/sync/deadlines.go
* go fmt
2020-06-23 09:00:09 +00:00
Victor Farazdagi
5c90038007 Adds aggregation/attestations package (#6343)
* adds feature flag

* aggregations/attestation package

* better tests

* bazel visibility issues

* removes redundant code

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-22 23:00:38 -05:00
Raul Jordan
71a6c73e93 Remove All Instances of Kademlia from Prysm (#6323)
* fully remove kad
* Merge branch 'master' into rem-kad
* Update beacon-chain/p2p/service.go
* no workspace kad
* Merge branch 'rem-kad' of github.com:prysmaticlabs/prysm into rem-kad
* mod tidy
* Merge refs/heads/master into rem-kad
* Merge refs/heads/master into rem-kad
* Merge refs/heads/master into rem-kad
* add back bootnode query tool
* Merge branch 'rem-kad' of github.com:prysmaticlabs/prysm into rem-kad
* fix up multiaddr tests
* fix build
* fix up go build
* fix go build
* update dep
* Merge refs/heads/master into rem-kad
* Merge refs/heads/master into rem-kad
* Merge refs/heads/master into rem-kad
* Merge refs/heads/master into rem-kad
2020-06-23 02:53:11 +00:00
terence tsao
5c8da7a1c4 QSP58, QSP59 - Fix DB spans (#6352)
* Fix spans
* Merge refs/heads/master into qsp58-59
2020-06-23 02:22:22 +00:00
terence tsao
368af7e53f Clean up old block processing paths (#6311)
* Remove old block processing paths

* Go fmt

* Go fmt again

* Fix tests

* Using the wright receiver

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-06-22 17:19:33 -07:00
Victor Farazdagi
41c8ff4c1b Renames receiver in sync service (#6348)
* updates coinciding vars
* renames receiver in sync service
* Merge branch 'master' into sync-rename-receiver
2020-06-22 20:37:48 +00:00
terence tsao
c837dfb2f2 ActivationEligibilityEpoch nil case (#6347) 2020-06-22 14:41:21 -05:00
Nishant Das
3b2dc8346d Release State Field Trie (#6340)
* remove flag and references
* fix flag
* Merge refs/heads/master into releaseStateFieldTrie
* Merge refs/heads/master into releaseStateFieldTrie
2020-06-22 15:25:55 +00:00
Ivan Martinez
97315c8837 Add flat spans to slasher runtime (#6287)
* Add flat spans to runtime

* Fix tests

* Remove normal span cache from runtime

* Uncomment

* beyond lookback to db

* Remove heavy span

* better cache handling for disable lookback

* Fix lint

* Fix lint again

* Add cache back for now

* Update slasher/detection/attestations/spanner_test.go

* Fix imports

Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-06-22 09:55:52 -05:00
Victor Farazdagi
6f8349cdb4 Reclaims leakybucket resources in sync service (#6339)
* Reclaims leakybucket resources
* move calls to defer
* do not reuse queue, after stopping
2020-06-22 11:23:23 +00:00
Preston Van Loon
d4545233cd Fix race condition issues in beacon state (#6322)
* Fix race condition issues in beacon state
* Merge branch 'master' of github.com:prysmaticlabs/prysm into race-condition-fixes
* Checkout beacon-chain/state/stateutil/BUILD.bazel from master
* @nisdas PR feedback. defer unlock
2020-06-20 00:27:23 +00:00
rkapka
4f4846794a renamed variable (#6319) 2020-06-19 13:46:18 -07:00
Nishant Das
7e1a61f8eb Add Debug Endpoints For Peers (#6304)
* add debug endpoints

* gaz

* add more stuff

* add tests

* clean up

* Update beacon-chain/rpc/debug/server.go

* fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-19 13:25:47 -05:00
Patrick
624209eba7 Fix Pagination Bug When Total Size % Page Size == 0 (#6266)
* Add failing test for pagination

* Fix failing test for pagination

* Fix other tests for pagination

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-06-19 09:39:12 -05:00
terence tsao
394f3ab53a Revert "Fetch deposit data only up to canonical Eth1 height when proposing (#6309)" (#6313)
This reverts commit fb26b0b65c.
2020-06-18 16:56:38 -07:00
rkapka
fb26b0b65c Fetch deposit data only up to canonical Eth1 height when proposing (#6309)
* get data up to canonical block height

* gofmt

* simplified calculation of canonical height
2020-06-18 17:31:26 -05:00
Victor Farazdagi
90542c21dc Implements max k-coverage greedy algorithm (#6305)
* implements max k-coverage greedy algorithm
* updates go-bitfield dependency
* gazelle
* update base aggregate
* re-arrange to shared
* clean references to atts in max cover
* max_cover: updates visibility
* fixes tests
* attestations related methods
* Merge branch 'master' into attaggregation-max-cover
* better op order
* fix comments
* removes debug stringer methods
* Merge refs/heads/master into attaggregation-max-cover
* log random seed
* Merge branch 'attaggregation-max-cover' of github.com:prysmaticlabs/prysm into attaggregation-max-cover
* Merge refs/heads/master into attaggregation-max-cover
* adds more comments
* Merge branch 'attaggregation-max-cover' of github.com:prysmaticlabs/prysm into attaggregation-max-cover
* fixes typo
2020-06-18 20:56:23 +00:00
Preston Van Loon
6926592065 Add a debug log statement when enabling dev flags (#6306)
* Add a debug log statement when enabling dev flags
* Merge refs/heads/master into add-dbg-log-for-dev-flags
2020-06-18 20:25:34 +00:00
Preston Van Loon
addc3897c4 Use STABLE_ prefix for git tag to force a relink of version (#6307)
* Use STABLE_ prefix for git tag to force a relink of version
* Add comment about STABLE_
2020-06-18 19:59:47 +00:00
Raul Jordan
7067c84c69 Stream Duties Client Implementation (#5867)
* include validator client stream

* Update validator/client/validator_attest.go

* gazelle

* rem extraneous logs

* fixing tests

* resolve most tests

* gaz

* add lock

* ivan feedback

* pass tests for update protect

* gaz

* duties gaz

* no need for canonical head slot

* fix ctx leak

* fmt

* add in feature flag

* add streaming subpackage

* add polling/streaming separation

* able to build

* fix duplicate package names

* fix polling

* imports

* confirm it works

* fixed up comment

* go lint comments

* gaz

* build

* Update validator/client/streaming/service_test.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* tidy

* fmt

* add stream duties to e2e

* add stream duties to e2e flags

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-06-18 13:30:05 -05:00
Jim McDonald
10af753f59 Unlock mutex on return (#6303)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-06-18 11:04:08 -05:00
Nishant Das
5201403750 update herumi (#6301)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-18 09:54:55 -05:00
Jim McDonald
86e9b0ae51 Ping handler timeout (#6302)
* Fix goroutine using cancelled context.

* Only close stream when finished

* Simplify function structure
2020-06-18 19:59:18 +08:00
Nishant Das
340ddf20cb Update Node RPC Endpoints (#6289)
* checkpoint progress
* some more stuff
* fix tests
* fix tests
* lint
* fix test and initialization
* gaz
* Merge branch 'master' into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
* Merge refs/heads/master into updateP2PRPC
2020-06-18 03:53:46 +00:00
Preston Van Loon
da5ab499da Add @prysmaticlabs/core-team as codeowner for deps.bzl (#6296)
* Add @prysmaticlabs/core-team as codeowner for deps.bzl
* Update CODEOWNERS
2020-06-18 03:38:02 +00:00
Paul Nelson
c1c3b75867 Move generic HashTreeRoot funcs to //shared (#6167)
* Functions that are to be moved to //shared/htrutils captured in separate file.

* Move files into new //shared/htutils package

hash_function.go renamed to hashers.go

* Refactor to reference moved methods in new package

- Added import for htrutils to state and stateutil packages
- Update references to imported funcs (append "htrutils.")
- Updated funcs in htrutils to be exported where necessary

* Add tests
2020-06-18 10:15:13 +08:00
terence tsao
640bba8a6c Avoid active validator count cache for genesis (#6292)
* Add helper to prevent zero hashes

* Test

* Don't use cache if it's genesis

* Regression test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-17 19:48:10 -05:00
terence tsao
6781bd643b Fix getting att pre state in sync (#6295) 2020-06-17 16:44:43 -07:00
terence tsao
802bdf5561 Fix GetValidatorPerformance before/after balances (#6293)
* Add helper to prevent zero hashes

* Test

* Fix precompute validators to have before/after balances

* Update tests to use correct values

* Even better test setups

* Gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-17 13:46:42 -05:00
terence tsao
523fc621d9 First part of block chain service cleanup (#6286)
* Add helper to prevent zero hashes
* Test
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Remove ReceiveBlockNoPubsubForkchoice and rename ReceiveBlockInitialSync
* Apply feature flag to process_block.go
* Update mock and tests
* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Merge refs/heads/master into bc-pkg-cleanup
* Merge refs/heads/master into bc-pkg-cleanup
* Fix mock ReceiveBlockInitialSync. Thanks Victor!
* Merge branch 'bc-pkg-cleanup' of github.com:prysmaticlabs/prysm into bc-pkg-cleanup
* Merge refs/heads/master into bc-pkg-cleanup
* Merge refs/heads/master into bc-pkg-cleanup
2020-06-17 17:11:21 +00:00
Shay Zluf
472fbd3949 Less restrictive block stream for slashing detection (#6269)
* verify sig in stream

* state cache used

* fix import

* fix import

* fix tests

* add log for proposer slashing

* fix e2e

* use chain head to verify sig

* revert fix

* remove extra line

* add line

* revert extra log

* fix tests

* terence feedback

* fix

* fix test

* remove extra setup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-17 12:38:07 -04:00
Ivan Martinez
cd439adfc7 Implement flat spans cache into DB (#6248)
* Implement flat spans cache DB

* Fix interface

* Fix db func

* Gaz

* Update slasher/detection/attestations/types/epoch_store_test.go

* Fix tests

* Fix

* Fix for comments

* Fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-06-17 10:26:08 -05:00
Raul Jordan
0bfa1ecd03 Clarify Insecure gRPC Connection Logs (#6276)
* clarify insecure conn
* fix up broken test
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
* Merge refs/heads/master into amend-grpc-instructions
2020-06-17 14:32:13 +00:00
terence tsao
af3122a9e8 Fork choice avoids redundant call to get_ancestor (#6282)
* Add helper to prevent zero hashes

* Test

* Add cacheJustifiedStateBalances

* Cache justifed balances

* Use cached justified balances

* Remove unncessary justification checks during initial syncing

* Test

* Proper locking

* Use getter

* Add CheckPointIsEqual helper

* Test CheckPointIsEqual helper

* Update finalizedImpliesNewJustified

* Better tests for finalizedImpliesNewJustified

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-17 08:50:10 -05:00
Preston Van Loon
4740f7ed57 pre-allocate memory for AggregateVerify (#6285)
* pre-allocate memory for AggregateVerify
2020-06-16 23:21:31 +00:00
Preston Van Loon
213be4cfbe Verify many block attestations at once (#6277) 2020-06-16 14:19:10 -07:00
Preston Van Loon
5fa3bbb991 Add --initial-sync-verify-all-signat as a dev flag. Issue #5176. (#6283) 2020-06-16 11:06:32 -07:00
terence tsao
fe2f8a1c04 Use justified balances for fork choice (#6272) 2020-06-16 08:10:34 -07:00
Nishant Das
ed883c9fff More Fixes For Powchain (#6278)
* fix issues

* add check

* Update beacon-chain/powchain/service.go

* Update beacon-chain/powchain/service.go

* Update beacon-chain/powchain/service.go

* Update beacon-chain/powchain/service.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-06-15 21:07:20 -05:00
terence tsao
4c143f4ed4 Fix underflow in resyncIfBehind (#6274) 2020-06-15 16:46:36 -07:00
Preston Van Loon
8097eea607 Fix underflow in SlotsSinceGenesis (#6275)
* Fix an underflow in genesis time calculation
* goimports
2020-06-15 21:56:11 +00:00
Ivan Martinez
4b4a7459d9 Fix windows DB path error and make errors more concise (#6270)
* Make DB errors more verbose, fix windows path error
* Merge branch 'master' into windows-err
* Merge refs/heads/master into windows-err
* Merge refs/heads/master into windows-err
2020-06-15 20:00:35 +00:00
Ivan Martinez
bf548d1670 Fix validator creation path (#6271)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-15 12:27:45 -07:00
terence tsao
e59ee2f203 Fix validator summary reporting (#6267)
* Add helper to prevent zero hashes
* Test
* Merge branch 'master' of github.com:prysmaticlabs/prysm
* Update validator summary
* Update existing tests to work with new indices
* Remove extra log
* Gazelle
* Merge branch 'master' into fix-validatory-summary
* Merge refs/heads/master into fix-validatory-summary
* Merge refs/heads/master into fix-validatory-summary
2020-06-15 17:25:58 +00:00
Raul Jordan
83d1aa8fa2 Default BeaconRPCProvider for Validator Should Match Beacon Node's Host (#6268)
* default bind to localhost for validator
* Merge branch 'master' into validator-localhost
2020-06-15 16:52:15 +00:00
Victor Farazdagi
9d42148b93 Go mod tidy (#6264)
* updates go.sum
* go mod tidy checker
* reverts go.sum
* updates apk command
* fixes path
* show git status
* more debug info
* more debug info
* minor test
* get rid of git
* multiple checks
* test reset
* change order of checkers
* mod verify
* broken sum
* Merge refs/heads/master into go-mod-tidy
* better info
* Merge branch 'go-mod-tidy' of github.com:prysmaticlabs/prysm into go-mod-tidy
* better info
* reset
* run go mod tidy to fix
2020-06-15 16:15:54 +00:00
Nishant Das
87084ec9f1 fix issues (#6265) 2020-06-15 10:43:45 -05:00
Victor Farazdagi
ebd45ba336 updates go.sum (#6262) 2020-06-15 06:37:07 -07:00
Nishant Das
9d8420b20c Fix Discovery Panic (#6261)
* fix dv5 panic
* add back
2020-06-15 04:22:29 +00:00
terence tsao
a9070ad725 ActiveValidatorCount to use cache (#6254)
* Add helper to prevent zero hashes

* Test

* Add ActiveIndicesCount getter for committee cache

* ActiveIndicesCount test

* ActiveValidatorCount to use cache

* Update cache on miss

* Update tests
2020-06-14 15:24:49 -07:00
terence tsao
f4e9e2f49c Minor clean up to 6249 (#6253)
* Add helper to prevent zero hashes

* Test

* Comments

* Comments
2020-06-14 10:43:25 -07:00
Nishant Das
1dfeb645b6 Filter Peers Properly With Connection Gater (#6251)
* add new filtering
* add new tests
* gaz
* shay's review
* Merge branch 'master' into fixFiltering
2020-06-14 14:47:58 +00:00
Nishant Das
933ab6808b mark peer as bad (#6250) 2020-06-14 07:10:18 -07:00
Nishant Das
f9d80e4a28 Fix Genesis Justified Checkpoints (#6249)
* handle zero hash
2020-06-14 09:02:56 +00:00
Ivan Martinez
f6ecf66d1a Fix pubkey used in validator metrics (#6246)
* Fix validator metrics pubkey
* Merge branch 'master' into val-metrics
2020-06-14 08:28:26 +00:00
Nishant Das
caf61bd824 Use Connection Gater to Manage Peer Connections (#6243)
* use gater
* Merge branch 'master' into connGater
* Update beacon-chain/p2p/testing/p2p.go
* fmt
* gaz
2020-06-14 07:35:05 +00:00
terence tsao
87ba5a5993 Fix ancestor to return most recent root in the case of skip slot (#6242)
* Revert "remove excess copies (#6142)"

This reverts commit c956905cf0.
* Update ancestor function to handle skip slot
* Test
* Revert "Revert "remove excess copies (#6142)""

This reverts commit f1222b5e77.
* Merge refs/heads/master into fix-ancestor
2020-06-14 06:58:36 +00:00
Nishant Das
0067e52b1e Revert "Remove Excess State Copies" (#6240)
* Revert "remove excess copies (#6142)"

This reverts commit c956905cf0.
2020-06-14 06:34:33 +00:00
Shay Zluf
4c66edf2c2 Add cache for flan spans (#6199)
* flat span cache

* Plug cache into kv

* Add comments

* Fix

* Add doc.go

* Gaz

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-13 22:51:54 -04:00
terence tsao
c29cccf78e Don't regen parent state for genesis state (#6237) 2020-06-13 11:04:04 -07:00
Raul Jordan
298955c92b Add a Log for Newly Connected gRPC Clients in the Beacon Node (#6233)
* interceptor disable logging via feature flag
* Merge branch 'master' into validator-connection-logs
2020-06-12 20:41:05 +00:00
Raul Jordan
de45a54991 Do Not Ignore Max Gateway Size gRPC (#6232)
* do not ignore max message size in gateway
* fix build
2020-06-12 20:02:20 +00:00
Raul Jordan
87ca73d605 Countdown Until Genesis (#6231)
* added in logic
* latest countdown
* fixed up formatting and add tests
* ready for review
* Merge branch 'master' into countdown-genesis
* no log if after genesis
* Merge branch 'countdown-genesis' of github.com:prysmaticlabs/prysm into countdown-genesis
* countdown
* smarter logging
* added buffer period for countdown
* Merge refs/heads/master into countdown-genesis
2020-06-12 18:47:18 +00:00
terence tsao
bc76c95d62 Skip proposer duty calculation for slot 0 (#6228)
* Skip proposer assignment for slot 0

* Test

* Clear cache before test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-12 11:20:50 -07:00
Preston Van Loon
c461c72777 Add dockerhub image endpoints (#6230)
* Add dockerhub image endpoints
2020-06-12 17:06:12 +00:00
Nishant Das
c725a9c8f2 Sort Duplicate Blocks In Response (#6222)
* sort out new blocks
* add unit test
* Merge refs/heads/master into sortDuplicates
* victor's review
* Merge branch 'sortDuplicates' of https://github.com/prysmaticlabs/geth-sharding into sortDuplicates
* Merge refs/heads/master into sortDuplicates
2020-06-12 13:50:07 +00:00
Nishant Das
412da8e7cc Follow Up to #6225 (#6226)
* don't wait
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into dontWait
* revert some changes
* bring back
2020-06-12 13:23:29 +00:00
Nishant Das
82f4d6a14a Do Not Wait Till Genesis (#6225)
* don't wait
2020-06-12 11:44:04 +00:00
Preston Van Loon
89762492c8 Reduce JSON-RPC queries (#6221)
* Delete unused client property
* Only update the logs every 5 minutes
* Merge branch 'master' into reduce-json-rpc-calls
* Merge refs/heads/master into reduce-json-rpc-calls
* Merge refs/heads/master into reduce-json-rpc-calls
* PR feedback from @nisdas
* gofmt
* Merge branch 'reduce-json-rpc-calls' of github.com:prysmaticlabs/prysm into reduce-json-rpc-calls
2020-06-12 04:05:40 +00:00
Nishant Das
1a6edbbaa2 Fix Proposer Bug in Prysm (#6213)
* advance slots instead of setting slots
* Merge branch 'master' into fixProposerBug
* comment
* Merge branch 'fixProposerBug' of https://github.com/prysmaticlabs/geth-sharding into fixProposerBug
* Refactor `packAttestations` to utilize same advanced state
* Merge branch 'master' into fixProposerBug
* Merge branch 'master' into fixProposerBug
* Merge branch 'master' into fixProposerBug
* Merge refs/heads/master into fixProposerBug
* Merge refs/heads/master into fixProposerBug
* Update beacon-chain/rpc/validator/proposer.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Merge refs/heads/master into fixProposerBug
2020-06-11 23:05:10 +00:00
SjonHortensius
409b167899 Provide some feedback to user when creating an account (#6212)
* Provide some feedback to user when creating an account

on my 1y old server the first step took 2 minutes, the second took 4
minutes. Provide at lease _some_ feedback we are doing something
intensive
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
* Merge branch 'master' into master
2020-06-11 22:36:08 +00:00
Raul Jordan
abec538fda Prevent Whistleblower Index Bug (#6219)
* resolve proposer panic

* proposer is whistleblower

* import

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-11 15:08:26 -05:00
Shay Zluf
898cd8b42b No sig slasher rpc (#6174)
* detect slashable attestations and blocks without db update
* lint fixes
* fix mock
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* preston feedback
* Merge branch 'no_sig_slasher_rpc' of github.com:prysmaticlabs/prysm into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* nishant and ivan feedback
* Merge branch 'no_sig_slasher_rpc' of github.com:prysmaticlabs/prysm into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Merge refs/heads/master into no_sig_slasher_rpc
* Apply suggestions from code review
2020-06-11 18:50:12 +00:00
Ivan Martinez
ec6309a928 Cleanup flat spans implementation (#6150)
* Flat spanner improvements

* Fix tests, add more to ES type

* Move types to detection/types

* Fix

* Fix comments

* Fixes

* Use SlotTickerWithOffset for StreamIndexedAttestations (#5999)

* Change streamindexed to slot ticker with offset
* Make 2/3rds
* Add test
* Merge branch 'master' of github.com:prysmaticlabs/prysm into ticker-offset
* Add check for offset
* Merge branch 'master' of github.com:prysmaticlabs/prysm into ticker-offset
* Fix test
* Merge refs/heads/master into ticker-offset

* Fix long running E2E after v0.12 changes (#6008)

* Fix deposits in long run e2e

* Fix participation

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Small fixes for slasher  (#6021)

* Small fixes for slasher

* Remove useless log

* Fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Add voluntary exit processing to E2E (#6016)

* Add voluntary exit to E2E

* Fix long urnning e2e

* Fix for comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Change slashing pool to use spec code  (#6030)

* Change slashing pool to use more spec code

* Fix test

* Undo unneeded changes

* Make sure to catch regression

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Fix long-running E2E (#6047)

* Fixes for long running E2E
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fix-e2e
* Move metrics check up
* Merge refs/heads/master into fix-e2e

* E2E Improvements (#6091)

* Some fixes
* Merge branch 'master' into e2e-fixes
* Add another small delay
* Merge branch 'e2e-fixes' of github.com:prysmaticlabs/prysm into e2e-fixes
* Remove genesis test, make normal e2e run longer
* Gaz
* more fixes
* Merge branch 'master' into e2e-fixes
* Merge refs/heads/master into e2e-fixes
* Fix comment
* Merge refs/heads/master into e2e-fixes

* Begin changing tests

* Start work on tests

* Redo tests for EpochStore

* Add test for highest index

* Fixes

* Gaz

* Fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-11 14:17:44 -04:00
Raul Jordan
2fb485598b Resolve Nil Validator Panic (#6218)
* resolve proposer panic
* Merge refs/heads/master into panic-proposer-slashed
2020-06-11 17:38:03 +00:00
terence tsao
cf3260b948 Revert "Send blocks that fails validation to slasher" (#6217)
* Revert "Send blocks that fail p2p validation to slasher (#6164)"

This reverts commit f40a7575de.
* Merge branch 'master' into revert-6164-slasherP2P
2020-06-11 17:11:28 +00:00
terence tsao
83242466f4 Fix proposer calculation in sync (#6214) 2020-06-11 09:31:01 -07:00
Nishant Das
7f3dc5a2dd Adds a User Agent for Libp2p (#6211)
* Add the latest git tag as part of the version string.

Example:

version=Prysm/v1.0.0-alpha.10/28e61fa40c7d16774b3b1c18d8382c64537bfa84. Built at: 2020-06-11 04:29:18+00:00

* add user agent

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-11 10:34:34 -05:00
Ivan Martinez
260847e92f Fix long running E2E deposits testing (#6208)
* Fix long running e2e deposit processing
* Merge branch 'master' of github.com:prysmaticlabs/prysm into fix-long-e2e
* Remove logs
* Uncomment
* typing
* Reorder
* Add const for node count
* Merge refs/heads/master into fix-long-e2e
* Add trace logging
* Merge branch 'fix-long-e2e' of github.com:prysmaticlabs/prysm into fix-long-e2e
2020-06-11 06:38:15 +00:00
Preston Van Loon
8815f6ced8 Add the latest git tag as part of the version string. (#6210)
Example:

version=Prysm/v1.0.0-alpha.10/28e61fa40c7d16774b3b1c18d8382c64537bfa84. Built at: 2020-06-11 04:29:18+00:00
2020-06-11 13:45:31 +08:00
Fabrice Cheng
28e61fa40c Verify External IP is reachable or log a warning if not (#6155)
* Only build with cgosymbolizer when explicitly desired. This fixes go builds for mac and others

* Try to dial external IP set and display a warning message is not reachable

* fix dialTimeout adding the port

* add connectivity check for UDP as well

* add connectivy verification for both UDP and TCP and move function in p2p/utils.go

* return if Resolving UDP fails

* move constant in utils

* add missing time dependencies

* leverage dial for both TCP/UDP (even thoguh UDP is not supported & add a unit tests for `verifyConnectivity`

* handle err from conn.Close()

* add utils_test go p2p gazelle

* Update beacon-chain/p2p/utils.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update beacon-chain/p2p/utils.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/p2p/utils.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* do not defer closing the connection

* leverage util helper testutil.AssertLogsContain and testutil.AssertLogsDoNotContain

* remove old code

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-06-10 18:41:11 -05:00
terence tsao
edeb359ded Use SSZ HTR as baseline (#6207)
* Use ssz htr as baseline to compare
* Merge branch 'master' into fix-test
* One more. Fix for TestBlockBodyRoot_NilIsSameAsEmpty
* Merge branch 'fix-test' of github.com:prysmaticlabs/prysm into fix-test
2020-06-10 17:51:06 +00:00
Shay Zluf
f40a7575de Send blocks that fail p2p validation to slasher (#6164)
* send blocks that fails validation to block feed for slasher. in order to enable slashing proposers

* add sig validation

* fix imports

* Update shared/featureconfig/config.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update shared/featureconfig/config.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* Update shared/featureconfig/flags.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* remove extra validation path for slasher

* ivan feedback

* notify all blocks that pass minimal validation to block stream

* notify all blocks that pass minimal validation to block stream

* add to e2e flags

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-06-10 13:24:05 -04:00
Nishant Das
a9e3ea3ccc Add Host Flags for All Servers running in Prysm (#6202)
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into removeHardLimit

# Conflicts:
add flags for all hosts
* lint
* Merge refs/heads/master into addFlagsForHosts
* Merge refs/heads/master into addFlagsForHosts
* Merge refs/heads/master into addFlagsForHosts
* use single flag
2020-06-10 16:04:32 +00:00
Jim McDonald
61855bc596 Add block number to deposit log (#6200)
* Add block number to deposit log
* Update beacon-chain/powchain/log_processing.go

Co-authored-by: Nishant Das <nish1993@hotmail.com>
* Merge branch 'master' into log-deposit-block
* gofmt
* Merge branch 'master' into log-deposit-block
2020-06-10 14:31:58 +00:00
Nishant Das
d66570c72b change gateway port (#6203) 2020-06-10 06:56:39 -07:00
Victor Farazdagi
1ffd13c4f5 Disallows empty string as validator password (#6201)
* allows empty string as validator password
* disallow empty pass on validator creation
* reset
2020-06-10 13:12:19 +00:00
Preston Van Loon
89e1b0f6bb Add --random flag for unencrypted keygen (#6194)
* Add --random flag for unencrypted keygen
* gofmt
2020-06-10 07:13:47 +00:00
Preston Van Loon
ae4a8b3aeb Update RBE toolchain and image to have libgmp-dev with bazel 3.2.0 (#6193)
* Update RBE toolchain and image to have libgmp-dev with bazel 3.2.0
* Merge branch 'master' into update-cross-compile
* some fixes for herumi cross compile
* gaz
2020-06-10 04:08:02 +00:00
751 changed files with 38371 additions and 20310 deletions

View File

@@ -1 +1 @@
3.0.0
3.2.0

View File

@@ -2,24 +2,14 @@
# across machines, developers, and workspaces.
#
# This config is loaded from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/latest.bazelrc
build:remote-cache --remote_cache=grpcs://remotebuildexecution.googleapis.com
build:remote-cache --remote_timeout=3600
build:remote-cache --auth_enabled=true
build:remote-cache --spawn_strategy=standalone
build:remote-cache --strategy=Javac=standalone
build:remote-cache --strategy=Closure=standalone
build:remote-cache --strategy=Genrule=standalone
# Build results backend.
#build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
#build:remote-cache --bes_backend=buildeventservice.googleapis.com
#build:remote-cache --bes_timeout=60s
#build:remote-cache --project_id=prysmaticlabs
# Prysm specific remote-cache properties.
build:remote-cache --disk_cache=
build:remote-cache --host_platform_remote_properties_override='properties:{name:\"cache-silo-key\" value:\"prysm\"}'
build:remote-cache --remote_instance_name=projects/prysmaticlabs/instances/default_instance
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_minimal
# Import workspace options.

3
.github/CODEOWNERS vendored
View File

@@ -3,3 +3,6 @@
# Starlark code owners
*.bzl @prestonvanloon
# Anyone on prylabs team can approve dependency updates.
deps.bzl @prysmaticlabs/core-team

5
.github/actions/gomodtidy/Dockerfile vendored Normal file
View File

@@ -0,0 +1,5 @@
FROM golang:alpine
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

5
.github/actions/gomodtidy/action.yml vendored Normal file
View File

@@ -0,0 +1,5 @@
name: 'Go mod tidy checker'
description: 'Checks that `go mod tidy` has been applied.'
runs:
using: 'docker'
image: 'Dockerfile'

34
.github/actions/gomodtidy/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/sh -l
set -e
export PATH=$PATH:/usr/local/go/bin
cd $GITHUB_WORKSPACE
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy
echo "Checking go.mod and go.sum:"
checks=0
if [ "$(diff -s go.mod.orig go.mod | grep -c 'Files go.mod.orig and go.mod are identical')" = 1 ]; then
echo "- go.mod is up to date."
checks=$((checks + 1))
else
echo "- go.mod is NOT up to date."
fi
if [ "$(diff -s go.sum.orig go.sum | grep -c 'Files go.sum.orig and go.sum are identical')" = 1 ]; then
echo "- go.sum is up to date."
checks=$((checks + 1))
else
echo "- go.sum is NOT up to date."
fi
if [ $checks -eq 2 ]; then
exit 0
fi
# Notify of any issues.
echo "Run 'go mod tidy' to update."
exit 1

View File

@@ -15,6 +15,10 @@ jobs:
- name: Checkout
uses: actions/checkout@v1
- name: Go mod tidy checker
id: gomodtidy
uses: ./.github/actions/gomodtidy
- name: Gofmt checker
id: gofmt
uses: ./.github/actions/gofmt

View File

@@ -105,6 +105,7 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library",
"//tools/analyzers/roughtime:go_tool_library",
"//tools/analyzers/cryptorand:go_tool_library",
"//tools/analyzers/errcheck:go_tool_library",
"//tools/analyzers/featureconfig:go_tool_library",
] + select({

View File

@@ -1,6 +1,8 @@
# Contribution Guidelines
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer? Our [READINGS.md](https://github.com/prysmaticlabs/prysm/blob/master/docs/READINGS.md) doc includes comprehensive information on Ethereum and sharding for both part-time and core contributors to the project.
Note: The latest and most up to date documenation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PRs after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/che9auJ) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding) drop us a line there if you want to get more involved or have any questions on our implementation!

View File

@@ -157,7 +157,7 @@ go_rules_dependencies()
go_register_toolchains(nogo = "@//:nogo")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
gazelle_dependencies()
@@ -354,3 +354,24 @@ load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
geth_dependencies()
# Do NOT add new go dependencies here! Refer to DEPENDENCIES.md!
go_repository(
name = "com_github_nbutton23_zxcvbn_go",
importpath = "github.com/nbutton23/zxcvbn-go",
sum = "h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=",
version = "v0.0.0-20180912185939-ae427f1e4c1d",
)
go_repository(
name = "com_github_brianium_mnemonic",
importpath = "github.com/brianium/mnemonic",
sum = "h1:futFTqrUAf1IanFLU+jK4D1NpgE/+gCbnCG7Fl0rHs0=",
version = "v0.0.0-20180124190051-72af92c51f88",
)
go_repository(
name = "com_github_logrusorgru_aurora",
importpath = "github.com/logrusorgru/aurora",
sum = "h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=",
version = "v2.0.3+incompatible",
)

View File

@@ -20,6 +20,7 @@ go_library(
"//shared/debug:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/logutil:go_default_library",
"//shared/maxprocs:go_default_library",
"//shared/version:go_default_library",
"@com_github_ethereum_go_ethereum//log:go_default_library",
"@com_github_ipfs_go_log_v2//:go_default_library",
@@ -28,7 +29,6 @@ go_library(
"@com_github_urfave_cli_v2//:go_default_library",
"@com_github_urfave_cli_v2//altsrc:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
],
)
@@ -65,7 +65,7 @@ go_image(
"@com_github_urfave_cli_v2//:go_default_library",
"@com_github_urfave_cli_v2//altsrc:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library",
"//shared/maxprocs:go_default_library",
],
)
@@ -74,6 +74,8 @@ container_bundle(
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image",
},
tags = ["manual"],
)
@@ -89,6 +91,8 @@ container_bundle(
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-debug": ":image_debug",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
},
tags = ["manual"],
)
@@ -104,6 +108,8 @@ container_bundle(
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
},
tags = ["manual"],
)

View File

@@ -1,46 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
srcs = ["service.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/archiver",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -1,200 +0,0 @@
package archiver
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "archiver")
// Service defining archiver functionality for persisting checkpointed
// beacon chain information to a database backend for historical purposes.
type Service struct {
ctx context.Context
cancel context.CancelFunc
beaconDB db.NoHeadAccessDatabase
headFetcher blockchain.HeadFetcher
participationFetcher blockchain.ParticipationFetcher
stateNotifier statefeed.Notifier
lastArchivedEpoch uint64
}
// Config options for the archiver service.
type Config struct {
BeaconDB db.NoHeadAccessDatabase
HeadFetcher blockchain.HeadFetcher
ParticipationFetcher blockchain.ParticipationFetcher
StateNotifier statefeed.Notifier
}
// NewArchiverService initializes the service from configuration options.
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
ctx, cancel := context.WithCancel(ctx)
return &Service{
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
headFetcher: cfg.HeadFetcher,
participationFetcher: cfg.ParticipationFetcher,
stateNotifier: cfg.StateNotifier,
}
}
// Start the archiver service event loop.
func (s *Service) Start() {
go s.run(s.ctx)
}
// Stop the archiver service event loop.
func (s *Service) Stop() error {
defer s.cancel()
return nil
}
// Status reports the healthy status of the archiver. Returning nil means service
// is correctly running without error.
func (s *Service) Status() error {
return nil
}
// We archive committee information pertaining to the head state's epoch.
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return errors.Wrap(err, "could not generate seed")
}
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return errors.Wrap(err, "could not generate seed")
}
info := &pb.ArchivedCommitteeInfo{
ProposerSeed: proposerSeed[:],
AttesterSeed: attesterSeed[:],
}
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
return errors.Wrap(err, "could not archive committee info")
}
return nil
}
// We archive active validator set changes that happened during the previous epoch.
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
prevEpoch := epoch - 1
vals := headState.Validators()
activations := validators.ActivatedValidatorIndices(prevEpoch, vals)
slashings := validators.SlashedValidatorIndices(prevEpoch, vals)
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
if err != nil {
return errors.Wrap(err, "could not get active validator count")
}
exited, err := validators.ExitedValidatorIndices(prevEpoch, vals, activeValidatorCount)
if err != nil {
return errors.Wrap(err, "could not determine exited validator indices")
}
activeSetChanges := &pb.ArchivedActiveSetChanges{
Activated: activations,
Exited: exited,
Slashed: slashings,
}
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
return errors.Wrap(err, "could not archive active validator set changes")
}
return nil
}
// We compute participation metrics by first retrieving the head state and
// matching validator attestations during the epoch.
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
pBal := s.participationFetcher.Participation(epoch)
participation := &ethpb.ValidatorParticipation{}
if pBal != nil {
participation = &ethpb.ValidatorParticipation{
EligibleEther: pBal.ActivePrevEpoch,
VotedEther: pBal.PrevEpochTargetAttested,
GlobalParticipationRate: float32(pBal.PrevEpochTargetAttested) / float32(pBal.ActivePrevEpoch),
}
}
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
}
// We archive validator balances and active indices.
func (s *Service) archiveBalances(ctx context.Context, balances []uint64, epoch uint64) error {
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
return errors.Wrap(err, "could not archive balances")
}
return nil
}
func (s *Service) run(ctx context.Context) {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed {
data, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
log.Error("Event feed data is not type *statefeed.BlockProcessedData")
continue
}
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
headState, err := s.headFetcher.HeadState(ctx)
if err != nil {
log.WithError(err).Error("Head state is not available")
continue
}
slot := headState.Slot()
currentEpoch := helpers.SlotToEpoch(slot)
if !helpers.IsEpochEnd(slot) && currentEpoch <= s.lastArchivedEpoch {
continue
}
epochToArchive := currentEpoch
if !helpers.IsEpochEnd(slot) {
epochToArchive--
}
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive committee info")
continue
}
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive active validator set changes")
continue
}
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
log.WithError(err).Error("Could not archive validator participation")
continue
}
if err := s.archiveBalances(ctx, headState.Balances(), epochToArchive); err != nil {
log.WithError(err).Error("Could not archive validator balances and active indices")
continue
}
log.WithField(
"epoch",
epochToArchive,
).Debug("Successfully archived beacon chain data during epoch")
s.lastArchivedEpoch = epochToArchive
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state feed notifier failed")
return
}
}
}

View File

@@ -1,474 +0,0 @@
package archiver
import (
"context"
"fmt"
"io/ioutil"
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func init() {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
}
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
hook := logTest.NewGlobal()
svc, _ := setupService(t)
st := testutil.NewBeaconState()
if err := st.SetSlot(1); err != nil {
t.Fatal(err)
}
svc.headFetcher = &mock.ChainService{
State: st,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
testutil.AssertLogsContain(t, hook, "Received block processed event")
}
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
hook := logTest.NewGlobal()
svc, _ := setupService(t)
// The head state is NOT an epoch end.
st := testutil.NewBeaconState()
if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch - 2); err != nil {
t.Fatal(err)
}
svc.headFetcher = &mock.ChainService{
State: st,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
// The context should have been canceled.
if svc.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
testutil.AssertLogsContain(t, hook, "Received block processed event")
// The service should ONLY log any archival logs if we receive a
// head slot that is an epoch end.
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
}
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
hook := logTest.NewGlobal()
svc, _ := setupService(t)
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
exitRoutine := make(chan bool)
go func() {
svc.run(svc.ctx)
<-exitRoutine
}()
// Send out an event every slot, skipping the end slot of the epoch.
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
if err := headState.SetSlot(i); err != nil {
t.Fatal(err)
}
svc.headFetcher = &mock.ChainService{
State: headState,
}
if helpers.IsEpochEnd(i) {
continue
}
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = svc.stateNotifier.StateFeed().Send(event)
}
}
if err := svc.Stop(); err != nil {
t.Fatal(err)
}
exitRoutine <- true
// The context should have been canceled.
if svc.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
testutil.AssertLogsContain(t, hook, "Received block processed event")
// Even though there was a skip slot, we should still be able to archive
// upon the next block event afterwards.
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, _ := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
attestedBalance := uint64(1)
currentEpoch := helpers.CurrentEpoch(headState)
wanted := &ethpb.ValidatorParticipation{
VotedEther: attestedBalance,
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
}
retrieved, err := svc.beaconDB.ArchivedValidatorParticipation(svc.ctx, currentEpoch)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(wanted, retrieved) {
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, _ := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(headState.Balances(), retrieved) {
t.Errorf(
"Wanted balances for epoch %d %v, retrieved %v",
helpers.CurrentEpoch(headState),
headState.Balances(),
retrieved,
)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, _ := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
currentEpoch := helpers.CurrentEpoch(headState)
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
t.Fatal(err)
}
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
t.Fatal(err)
}
wanted := &pb.ArchivedCommitteeInfo{
ProposerSeed: proposerSeed[:],
AttesterSeed: attesterSeed[:],
}
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
if err != nil {
t.Fatal(err)
}
if !proto.Equal(wanted, retrieved) {
t.Errorf(
"Wanted committee info for epoch %d %v, retrieved %v",
helpers.CurrentEpoch(headState),
wanted,
retrieved,
)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
prevEpoch := helpers.PrevEpoch(headState)
delayedActEpoch := helpers.ActivationExitEpoch(prevEpoch)
val1, err := headState.ValidatorAtIndex(4)
if err != nil {
t.Fatal(err)
}
val1.ActivationEpoch = delayedActEpoch
val2, err := headState.ValidatorAtIndex(5)
if err != nil {
t.Fatal(err)
}
val2.ActivationEpoch = delayedActEpoch
if err := headState.UpdateValidatorAtIndex(4, val1); err != nil {
t.Fatal(err)
}
if err := headState.UpdateValidatorAtIndex(5, val1); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if len(retrieved.Activated) != 98 {
t.Error("Did not get wanted active length")
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
prevEpoch := helpers.PrevEpoch(headState)
val1, err := headState.ValidatorAtIndex(95)
if err != nil {
t.Fatal(err)
}
val1.Slashed = true
val2, err := headState.ValidatorAtIndex(96)
if err != nil {
t.Fatal(err)
}
val2.Slashed = true
if err := headState.UpdateValidatorAtIndex(95, val1); err != nil {
t.Fatal(err)
}
if err := headState.UpdateValidatorAtIndex(96, val1); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
}
testutil.AssertLogsContain(t, hook, "Successfully archived")
}
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
hook := logTest.NewGlobal()
validatorCount := uint64(100)
headState, err := setupState(validatorCount)
if err != nil {
t.Fatal(err)
}
svc, beaconDB := setupService(t)
svc.headFetcher = &mock.ChainService{
State: headState,
}
prevEpoch := helpers.PrevEpoch(headState)
val, err := headState.ValidatorAtIndex(95)
if err != nil {
t.Fatal(err)
}
val.ExitEpoch = prevEpoch
val.WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
if err := headState.UpdateValidatorAtIndex(95, val); err != nil {
t.Fatal(err)
}
event := &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
BlockRoot: [32]byte{1, 2, 3},
Verified: true,
},
}
triggerStateEvent(t, svc, event)
testutil.AssertLogsContain(t, hook, "Successfully archived")
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
if err != nil {
t.Fatal(err)
}
if retrieved == nil {
t.Fatal("Retrieved indices are nil")
}
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
}
}
func setupState(validatorCount uint64) (*stateTrie.BeaconState, error) {
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
atts := []*pb.PendingAttestation{{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{}}}}
// We initialize a head state that has attestations from participated
// validators in a simulated fashion.
st := testutil.NewBeaconState()
if err := st.SetSlot((2 * params.BeaconConfig().SlotsPerEpoch) - 1); err != nil {
return nil, err
}
if err := st.SetValidators(validators); err != nil {
return nil, err
}
if err := st.SetBalances(balances); err != nil {
return nil, err
}
if err := st.SetCurrentEpochAttestations(atts); err != nil {
return nil, err
}
return st, nil
}
func setupService(t *testing.T) (*Service, db.Database) {
beaconDB := dbutil.SetupDB(t)
ctx, cancel := context.WithCancel(context.Background())
validatorCount := uint64(100)
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
mockChainService := &mock.ChainService{}
return &Service{
beaconDB: beaconDB,
ctx: ctx,
cancel: cancel,
stateNotifier: mockChainService.StateNotifier(),
participationFetcher: &mock.ChainService{
Balance: &precompute.Balance{ActivePrevEpoch: totalBalance, PrevEpochTargetAttested: 1}},
}, beaconDB
}
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
exitRoutine := make(chan bool)
go func() {
svc.run(svc.ctx)
<-exitRoutine
}()
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = svc.stateNotifier.StateFeed().Send(event)
}
if err := svc.Stop(); err != nil {
t.Fatal(err)
}
exitRoutine <- true
// The context should have been canceled.
if svc.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
}

View File

@@ -30,7 +30,6 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/flags:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
@@ -44,6 +43,7 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
@@ -74,14 +74,16 @@ go_test(
srcs = [
"chain_info_test.go",
"head_test.go",
"init_sync_process_block_test.go",
"info_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -97,6 +99,8 @@ go_test(
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
@@ -114,6 +118,7 @@ go_test(
name = "go_raceon_test",
srcs = [
"chain_info_norace_test.go",
"receive_block_test.go",
"service_norace_test.go",
],
embed = [":go_default_library"],
@@ -127,6 +132,7 @@ go_test(
race = "on",
tags = ["race_on"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -140,6 +146,8 @@ go_test(
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -13,6 +12,7 @@ import (
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
@@ -42,8 +42,8 @@ type HeadFetcher interface {
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
HeadState(ctx context.Context) (*state.BeaconState, error)
HeadValidatorsIndices(epoch uint64) ([]uint64, error)
HeadSeed(epoch uint64) ([32]byte, error)
HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error)
HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
ProtoArrayStore() *protoarray.Store
@@ -57,6 +57,7 @@ type ForkFetcher interface {
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
VerifyBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
@@ -67,12 +68,6 @@ type FinalizationFetcher interface {
PreviousJustifiedCheckpt() *ethpb.Checkpoint
}
// ParticipationFetcher defines a common interface for methods in blockchain service which
// directly retrieves validator participation related data.
type ParticipationFetcher interface {
Participation(epoch uint64) *precompute.Balance
}
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
if s.finalizedCheckpt == nil {
@@ -133,7 +128,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
}
// HeadBlock returns the head block of the chain.
// If the head state is nil from service struct,
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
if s.hasHeadState() {
@@ -144,31 +139,37 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
}
// HeadState returns the head state of the chain.
// If the head state is nil from service struct,
// If the head is nil from service struct,
// it will attempt to get the head state from DB.
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
if s.hasHeadState() {
return s.headState(), nil
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
defer span.End()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headState(ctx), nil
}
return s.beaconDB.HeadState(ctx)
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
if !s.hasHeadState() {
return []uint64{}, nil
}
return helpers.ActiveValidatorIndices(s.headState(), epoch)
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
}
// HeadSeed returns the seed from the head view of a given epoch.
func (s *Service) HeadSeed(epoch uint64) ([32]byte, error) {
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
if !s.hasHeadState() {
return [32]byte{}, nil
}
return helpers.Seed(s.headState(), epoch, params.BeaconConfig().DomainBeaconAttester)
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
@@ -218,14 +219,6 @@ func (s *Service) CurrentFork() *pb.Fork {
return s.head.state.Fork()
}
// Participation returns the participation stats of a given epoch.
func (s *Service) Participation(epoch uint64) *precompute.Balance {
s.epochParticipationLock.RLock()
defer s.epochParticipationLock.RUnlock()
return s.epochParticipation[epoch]
}
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
// If the block has been finalized, the block will always be part of the canonical chain.

View File

@@ -5,70 +5,59 @@ import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestHeadSlot_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
}
go func() {
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
}
func TestHeadRoot_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
head: &head{root: [32]byte{'A'}},
stateGen: stategen.New(db, cache.NewStateSummaryCache()),
stateGen: stategen.New(db, sc),
}
go func() {
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
if _, err := s.HeadRoot(context.Background()); err != nil {
t.Fatal(err)
}
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
}
func TestHeadBlock_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
head: &head{block: &ethpb.SignedBeaconBlock{}},
stateGen: stategen.New(db, cache.NewStateSummaryCache()),
stateGen: stategen.New(db, sc),
}
go func() {
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
if _, err := s.HeadBlock(context.Background()); err != nil {
t.Fatal(err)
}
_, err := s.HeadBlock(context.Background())
require.NoError(t, err)
}
func TestHeadState_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
stateGen: stategen.New(db, cache.NewStateSummaryCache()),
stateGen: stategen.New(db, sc),
}
go func() {
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
if _, err := s.HeadState(context.Background()); err != nil {
t.Fatal(err)
}
_, err := s.HeadState(context.Background())
require.NoError(t, err)
}

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"bytes"
"context"
"reflect"
"testing"
"time"
@@ -13,6 +12,8 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
// Ensure Service implements chain info interface.
@@ -21,43 +22,39 @@ var _ = TimeFetcher(&Service{})
var _ = ForkFetcher(&Service{})
func TestFinalizedCheckpt_Nil(t *testing.T) {
db := testDB.SetupDB(t)
c := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
t.Error("Incorrect pre chain start value")
}
}
func TestHeadRoot_Nil(t *testing.T) {
db := testDB.SetupDB(t)
c := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
headRoot, err := c.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !bytes.Equal(headRoot, params.BeaconConfig().ZeroHash[:]) {
t.Error("Incorrect pre chain start value")
}
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 5, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.finalizedCheckpt = cp
if c.FinalizedCheckpt().Epoch != cp.Epoch {
t.Errorf("Finalized epoch at genesis should be %d, got: %d", cp.Epoch, c.FinalizedCheckpt().Epoch)
}
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.finalizedCheckpt = cp
c.genesisRoot = genesisRoot
@@ -67,23 +64,21 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 6, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.justifiedCheckpt = cp
if c.CurrentJustifiedCheckpt().Epoch != cp.Epoch {
t.Errorf("Current Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.CurrentJustifiedCheckpt().Epoch)
}
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.justifiedCheckpt = cp
c.genesisRoot = genesisRoot
@@ -93,23 +88,21 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
}
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 7, Root: []byte("foo")}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.prevJustifiedCheckpt = cp
if c.PreviousJustifiedCheckpt().Epoch != cp.Epoch {
t.Errorf("Previous Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.PreviousJustifiedCheckpt().Epoch)
}
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db)
c := setupBeaconChain(t, db, sc)
c.prevJustifiedCheckpt = cp
c.genesisRoot = genesisRoot
@@ -125,49 +118,34 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
t.Fatal(err)
}
c.head = &head{slot: 100, state: s}
if c.HeadSlot() != 100 {
t.Errorf("Wanted head slot: %d, got: %d", 100, c.HeadSlot())
}
assert.Equal(t, uint64(100), c.headSlot())
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
if [32]byte{'A'} != c.headRoot() {
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.headRoot())
}
assert.Equal(t, [32]byte{'A'}, c.headRoot())
}
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
s, err := state.InitializeFromProto(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
c := &Service{}
c.head = &head{block: b, state: s}
recevied, err := c.HeadBlock(context.Background())
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(b, recevied) {
t.Error("incorrect head block received")
}
require.NoError(t, err)
assert.DeepEqual(t, b, recevied, "Incorrect head block received")
}
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !proto.Equal(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
t.Error("incorrect head state received")
}
@@ -176,17 +154,13 @@ func TestHeadState_CanRetrieve(t *testing.T) {
func TestGenesisTime_CanRetrieve(t *testing.T) {
c := &Service{genesisTime: time.Unix(999, 0)}
wanted := time.Unix(999, 0)
if c.GenesisTime() != wanted {
t.Error("Did not get wanted genesis time")
}
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
}
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &pb.Fork{Epoch: 999}
s, err := state.InitializeFromProto(&pb.BeaconState{Fork: f})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
if !proto.Equal(c.CurrentFork(), f) {
@@ -197,34 +171,24 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
if c.GenesisValidatorRoot() != [32]byte{} {
t.Error("Did not get correct genesis validator root")
}
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
s, err := state.InitializeFromProto(&pb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
c.head = &head{state: s}
if c.GenesisValidatorRoot() != [32]byte{'a'} {
t.Error("Did not get correct genesis validator root")
}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
}
func TestHeadETH1Data_Nil(t *testing.T) {
db := testDB.SetupDB(t)
c := setupBeaconChain(t, db)
if !reflect.DeepEqual(c.HeadETH1Data(), &ethpb.Eth1Data{}) {
t.Error("Incorrect pre chain start value")
}
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
assert.DeepEqual(t, &ethpb.Eth1Data{}, c.HeadETH1Data(), "Incorrect pre chain start value")
}
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state.InitializeFromProto(&pb.BeaconState{Eth1Data: d})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
if !proto.Equal(c.HeadETH1Data(), d) {

View File

@@ -1,6 +1,7 @@
package blockchain
import (
"bytes"
"context"
"fmt"
@@ -8,11 +9,11 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -26,10 +27,10 @@ type head struct {
state *state.BeaconState // current head state.
}
// This gets head from the fork choice service and saves head related items
// (ie root, block, state) to the local service cache.
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
ctx, span := trace.StartSpan(ctx, "blockchain.updateHead")
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// To get the proper head update, a node first checks its best justified
@@ -37,6 +38,9 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
// Get head from the fork choice service.
@@ -64,7 +68,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockchain.saveHead")
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
@@ -74,15 +78,8 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
return nil
}
} else {
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
}
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
return nil
}
// Get the new head block from DB.
@@ -95,21 +92,9 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// Get the new head state from cached state or DB.
var newHeadState *state.BeaconState
if featureconfig.Get().NewStateMgmt {
newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
} else {
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
}
newHeadState, err := s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
@@ -144,33 +129,24 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of inital-sync-cache-state flag, it uses finalized
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte) error {
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if bytes.Equal(r[:], cachedHeadRoot) {
return nil
}
if b == nil || b.Block == nil {
return errors.New("cannot save nil head block")
}
var headState *state.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
headState, err = s.stateGen.StateByRootInitialSync(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
} else {
headState, err = s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
}
s.initSyncStateLock.RUnlock()
}
headState, err := s.stateGen.StateByRootInitialSync(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
return errors.New("nil head state")
@@ -243,7 +219,10 @@ func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
// This returns the head state.
// It does a full copy on head state for immutability.
func (s *Service) headState() *state.BeaconState {
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -269,6 +248,9 @@ func (s *Service) hasHeadState() bool {
// This updates recent canonical block mapping. It uses input head root and retrieves
// all the canonical block roots that are ancestor of the input head block root.
func (s *Service) updateRecentCanonicalBlocks(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateRecentCanonicalBlocks")
defer span.End()
s.recentCanonicalBlocksLock.Lock()
defer s.recentCanonicalBlocksLock.Unlock()
@@ -290,3 +272,54 @@ func (s *Service) updateRecentCanonicalBlocks(ctx context.Context, headRoot [32]
return nil
}
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
var justifiedState *stateTrie.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.beaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.stateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}
}
if justifiedState == nil {
return errors.New("justified state can't be nil")
}
epoch := helpers.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}); err != nil {
return err
}
s.justifiedBalancesLock.Lock()
defer s.justifiedBalancesLock.Unlock()
s.justifiedBalances = justifiedBalances
return nil
}
func (s *Service) getJustifiedBalances() []uint64 {
s.justifiedBalancesLock.RLock()
defer s.justifiedBalancesLock.RUnlock()
return s.justifiedBalances
}

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"bytes"
"context"
"reflect"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -11,32 +10,27 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestSaveHead_Same(t *testing.T) {
db := testDB.SetupDB(t)
service := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
if err := service.saveHead(context.Background(), r); err != nil {
t.Fatal(err)
}
if service.headSlot() != 0 {
t.Error("Head did not stay the same")
}
if service.headRoot() != r {
t.Error("Head did not stay the same")
}
require.NoError(t, service.saveHead(context.Background(), r))
assert.Equal(t, uint64(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
func TestSaveHead_Different(t *testing.T) {
db := testDB.SetupDB(t)
service := setupBeaconChain(t, db)
ctx := context.Background()
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
oldRoot := [32]byte{'A'}
service.head = &head{slot: 0, root: oldRoot}
@@ -44,50 +38,31 @@ func TestSaveHead_Different(t *testing.T) {
newHeadBlock := &ethpb.BeaconBlock{Slot: 1}
newHeadSignedBlock := &ethpb.SignedBeaconBlock{Block: newHeadBlock}
if err := service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
newRoot, err := stateutil.BlockRoot(newHeadBlock)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
headState := testutil.NewBeaconState()
if err := headState.SetSlot(1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(context.Background(), headState, newRoot); err != nil {
t.Fatal(err)
}
if err := service.saveHead(context.Background(), newRoot); err != nil {
t.Fatal(err)
}
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
if service.HeadSlot() != 1 {
t.Error("Head did not change")
}
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headBlock(), newHeadSignedBlock) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headState().CloneInnerState(), headState.CloneInnerState()) {
t.Error("Head did not change")
}
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
service := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
oldRoot := [32]byte{'A'}
service.head = &head{slot: 0, root: oldRoot}
@@ -99,102 +74,68 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
}
newHeadSignedBlock := &ethpb.SignedBeaconBlock{Block: newHeadBlock}
if err := service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
newRoot, err := stateutil.BlockRoot(newHeadBlock)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
headState := testutil.NewBeaconState()
if err := headState.SetSlot(1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(context.Background(), headState, newRoot); err != nil {
t.Fatal(err)
}
if err := service.saveHead(context.Background(), newRoot); err != nil {
t.Fatal(err)
}
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
if service.HeadSlot() != 1 {
t.Error("Head did not change")
}
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headBlock(), newHeadSignedBlock) {
t.Error("Head did not change")
}
if !reflect.DeepEqual(service.headState().CloneInnerState(), headState.CloneInnerState()) {
t.Error("Head did not change")
}
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
testutil.AssertLogsContain(t, hook, "Chain reorg occurred")
}
func TestUpdateRecentCanonicalBlocks_CanUpdateWithoutParent(t *testing.T) {
db := testDB.SetupDB(t)
service := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
r := [32]byte{'a'}
if err := service.updateRecentCanonicalBlocks(context.Background(), r); err != nil {
t.Fatal(err)
}
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), r))
canonical, err := service.IsCanonical(context.Background(), r)
if err != nil {
t.Fatal(err)
}
if !canonical {
t.Error("Block should be canonical")
}
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
}
func TestUpdateRecentCanonicalBlocks_CanUpdateWithParent(t *testing.T) {
db := testDB.SetupDB(t)
service := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
oldHead := [32]byte{'a'}
if err := service.forkChoiceStore.ProcessBlock(context.Background(), 1, oldHead, [32]byte{'g'}, [32]byte{}, 0, 0); err != nil {
t.Fatal(err)
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 1, oldHead, [32]byte{'g'}, [32]byte{}, 0, 0))
currentHead := [32]byte{'b'}
if err := service.forkChoiceStore.ProcessBlock(context.Background(), 3, currentHead, oldHead, [32]byte{}, 0, 0); err != nil {
t.Fatal(err)
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 3, currentHead, oldHead, [32]byte{}, 0, 0))
forkedRoot := [32]byte{'c'}
if err := service.forkChoiceStore.ProcessBlock(context.Background(), 2, forkedRoot, oldHead, [32]byte{}, 0, 0); err != nil {
t.Fatal(err)
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 2, forkedRoot, oldHead, [32]byte{}, 0, 0))
if err := service.updateRecentCanonicalBlocks(context.Background(), currentHead); err != nil {
t.Fatal(err)
}
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), currentHead))
canonical, err := service.IsCanonical(context.Background(), currentHead)
if err != nil {
t.Fatal(err)
}
if !canonical {
t.Error("Block should be canonical")
}
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
canonical, err = service.IsCanonical(context.Background(), oldHead)
if err != nil {
t.Fatal(err)
}
if !canonical {
t.Error("Block should be canonical")
}
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
canonical, err = service.IsCanonical(context.Background(), forkedRoot)
if err != nil {
t.Fatal(err)
}
if canonical {
t.Error("Block should not be canonical")
}
require.NoError(t, err)
assert.Equal(t, false, canonical, "Block should not be canonical")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
state, _ := testutil.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}

View File

@@ -4,9 +4,9 @@ import (
"encoding/hex"
"fmt"
"net/http"
"strconv"
"github.com/emicklei/dot"
"github.com/prysmaticlabs/prysm/shared/params"
)
const template = `<html>
@@ -33,8 +33,8 @@ const template = `<html>
</html>`
// TreeHandler is a handler to serve /tree page in metrics.
func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
if s.headState() == nil {
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
if s.headState(r.Context()) == nil {
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
@@ -47,14 +47,14 @@ func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
graph.Attr("labeljust", "l")
dotNodes := make([]*dot.Node, len(nodes))
avgBalance := uint64(averageBalance(s.headState().Balances()))
avgBalance := uint64(averageBalance(s.headState(r.Context()).Balances()))
for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node.
slot := strconv.Itoa(int(nodes[i].Slot))
weight := strconv.Itoa(int(nodes[i].Weight / 1e9)) // Convert unit Gwei to unit ETH.
votes := strconv.Itoa(int(nodes[i].Weight / 1e9 / avgBalance))
index := strconv.Itoa(i)
slot := fmt.Sprintf("%d", nodes[i].Slot)
weight := fmt.Sprintf("%d", nodes[i].Weight/1e9) // Convert unit Gwei to unit ETH.
votes := fmt.Sprintf("%d", nodes[i].Weight/1e9/avgBalance)
index := fmt.Sprintf("%d", i)
g := nodes[i].Graffiti[:]
graffiti := hex.EncodeToString(g[:8])
label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti
@@ -64,7 +64,8 @@ func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
}
if nodes[i].Slot == s.headSlot() &&
nodes[i].BestDescendent == ^uint64(0) {
nodes[i].BestDescendant == ^uint64(0) &&
nodes[i].Parent != ^uint64(0) {
dotN = dotN.Attr("color", "green")
}
@@ -83,3 +84,11 @@ func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
log.WithError(err).Error("Failed to render p2p info page")
}
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -0,0 +1,46 @@
package blockchain
import (
"context"
"net/http"
"net/http/httptest"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_TreeHandler(t *testing.T) {
req, err := http.NewRequest("GET", "/tree", nil)
require.NoError(t, err)
ctx := context.Background()
db, sCache := testDB.SetupDB(t)
headState := testutil.NewBeaconState()
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
),
StateGen: stategen.New(db, sCache),
}
s, err := NewService(ctx, cfg)
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, testutil.NewBeaconBlock(), headState)
rr := httptest.NewRecorder()
handler := http.HandlerFunc(s.TreeHandler)
handler.ServeHTTP(rr, req)
assert.Equal(t, http.StatusOK, rr.Code)
}

View File

@@ -1,220 +1,9 @@
package blockchain
import (
"context"
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
)
const maxCacheSize = 70
const initialSyncCacheSize = 45
const minimumCacheSize = initialSyncCacheSize / 3
func (s *Service) persistCachedStates(ctx context.Context, numOfStates int) error {
oldStates := make([]*stateTrie.BeaconState, 0, numOfStates)
// Add slots to the map and add epoch boundary states to the slice.
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
oldStates = append(oldStates, s.initSyncState[rt])
}
err := s.beaconDB.SaveStates(ctx, oldStates, s.boundaryRoots[:numOfStates-minimumCacheSize])
if err != nil {
return err
}
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
delete(s.initSyncState, rt)
}
s.boundaryRoots = s.boundaryRoots[numOfStates-minimumCacheSize:]
return nil
}
// filter out boundary candidates from our currently processed batch of states.
func (s *Service) filterBoundaryCandidates(ctx context.Context, root [32]byte, postState *stateTrie.BeaconState) {
// Only trigger on epoch start.
if !helpers.IsEpochStart(postState.Slot()) {
return
}
stateSlice := make([][32]byte, 0, len(s.initSyncState))
// Add epoch boundary roots to slice.
for rt := range s.initSyncState {
stateSlice = append(stateSlice, rt)
}
sort.Slice(stateSlice, func(i int, j int) bool {
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
})
epochLength := params.BeaconConfig().SlotsPerEpoch
if len(s.boundaryRoots) > 0 {
// Retrieve previous boundary root.
previousBoundaryRoot := s.boundaryRoots[len(s.boundaryRoots)-1]
previousState, ok := s.initSyncState[previousBoundaryRoot]
if !ok {
// Remove the non-existent root and exit filtering.
s.boundaryRoots = s.boundaryRoots[:len(s.boundaryRoots)-1]
return
}
previousSlot := previousState.Slot()
// Round up slot number to account for skipped slots.
previousSlot = helpers.RoundUpToNearestEpoch(previousSlot)
if postState.Slot()-previousSlot >= epochLength {
targetSlot := postState.Slot()
tempRoots := s.loopThroughCandidates(stateSlice, previousBoundaryRoot, previousSlot, targetSlot)
s.boundaryRoots = append(s.boundaryRoots, tempRoots...)
}
}
s.boundaryRoots = append(s.boundaryRoots, root)
s.pruneOldStates()
s.pruneNonBoundaryStates()
}
// loop-through the provided candidate roots to filter out which would be appropriate boundary roots.
func (s *Service) loopThroughCandidates(stateSlice [][32]byte, previousBoundaryRoot [32]byte,
previousSlot uint64, targetSlot uint64) [][32]byte {
tempRoots := [][32]byte{}
epochLength := params.BeaconConfig().SlotsPerEpoch
// Loop through current states to filter for valid boundary states.
for i := len(stateSlice) - 1; stateSlice[i] != previousBoundaryRoot && i >= 0; i-- {
currentSlot := s.initSyncState[stateSlice[i]].Slot()
// Skip if the current slot is larger than the previous epoch
// boundary.
if currentSlot > targetSlot-epochLength {
continue
}
tempRoots = append(tempRoots, stateSlice[i])
// Switch target slot if the current slot is greater than
// 1 epoch boundary from the previously saved boundary slot.
if currentSlot > previousSlot+epochLength {
currentSlot = helpers.RoundUpToNearestEpoch(currentSlot)
targetSlot = currentSlot
continue
}
break
}
// Reverse to append the roots in ascending order corresponding
// to the respective slots.
tempRoots = bytesutil.ReverseBytes32Slice(tempRoots)
return tempRoots
}
// prune for states past the current finalized checkpoint.
func (s *Service) pruneOldStates() {
prunedBoundaryRoots := [][32]byte{}
for _, rt := range s.boundaryRoots {
st, ok := s.initSyncState[rt]
// Skip non-existent roots.
if !ok {
continue
}
if st.Slot() < helpers.StartSlot(s.FinalizedCheckpt().Epoch) {
delete(s.initSyncState, rt)
continue
}
prunedBoundaryRoots = append(prunedBoundaryRoots, rt)
}
s.boundaryRoots = prunedBoundaryRoots
}
// prune cache for non-boundary states.
func (s *Service) pruneNonBoundaryStates() {
boundaryMap := make(map[[32]byte]bool)
for i := range s.boundaryRoots {
boundaryMap[s.boundaryRoots[i]] = true
}
for rt := range s.initSyncState {
if !boundaryMap[rt] {
delete(s.initSyncState, rt)
}
}
}
func (s *Service) pruneOldNonFinalizedStates() {
stateSlice := make([][32]byte, 0, len(s.initSyncState))
// Add epoch boundary roots to slice.
for rt := range s.initSyncState {
stateSlice = append(stateSlice, rt)
}
// Sort by slots.
sort.Slice(stateSlice, func(i int, j int) bool {
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
})
boundaryMap := make(map[[32]byte]bool)
for i := range s.boundaryRoots {
boundaryMap[s.boundaryRoots[i]] = true
}
for _, rt := range stateSlice[:initialSyncCacheSize] {
if boundaryMap[rt] {
continue
}
delete(s.initSyncState, rt)
}
}
func (s *Service) generateState(ctx context.Context, startRoot [32]byte, endRoot [32]byte) (*stateTrie.BeaconState, error) {
preState, err := s.beaconDB.State(ctx, startRoot)
if err != nil {
return nil, err
}
if preState == nil {
if !s.stateGen.HasState(ctx, startRoot) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, errors.Wrap(err, "could not save initial sync blocks")
}
s.clearInitSyncBlocks()
}
preState, err = s.stateGen.StateByRoot(ctx, startRoot)
if err != nil {
return nil, err
}
if preState == nil {
return nil, errors.New("finalized state does not exist in db")
}
}
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, err
}
var endBlock *ethpb.SignedBeaconBlock
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(endRoot) {
endBlock = s.getInitSyncBlock(endRoot)
s.clearInitSyncBlocks()
} else {
endBlock, err = s.beaconDB.Block(ctx, endRoot)
if err != nil {
return nil, err
}
}
if endBlock == nil {
return nil, errors.New("provided block root does not have block saved in the db")
}
log.Warnf("Generating missing state of slot %d and root %#x", endBlock.Block.Slot, endRoot)
blocks, err := s.stateGen.LoadBlocks(ctx, preState.Slot()+1, endBlock.Block.Slot, endRoot)
if err != nil {
return nil, errors.Wrap(err, "could not load the required blocks")
}
postState, err := s.stateGen.ReplayBlocks(ctx, preState, blocks, endBlock.Block.Slot)
if err != nil {
return nil, errors.Wrap(err, "could not replay the blocks to generate the resultant state")
}
return postState, nil
}
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b *ethpb.SignedBeaconBlock) {
s.initSyncBlocksLock.Lock()

View File

@@ -1,313 +0,0 @@
package blockchain
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
)
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 500; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
lastIndex := len(service.boundaryRoots) - 1
for i := uint64(500); i < 2000; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
}
// Set current state.
latestSlot := helpers.RoundUpToNearestEpoch(2000)
if err := st.SetSlot(latestSlot); err != nil {
t.Fatal(err)
}
lastRoot := [32]byte{}
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
service.initSyncState[lastRoot] = st.Copy()
service.finalizedCheckpt = &ethpb.Checkpoint{
Epoch: 0,
Root: []byte{},
}
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
t.Fatal("Wanted non zero added boundary roots")
}
for _, rt := range service.boundaryRoots[lastIndex+1:] {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if !(helpers.IsEpochStart(st.Slot()) || helpers.IsEpochStart(st.Slot()-1) || helpers.IsEpochStart(st.Slot()+1)) {
t.Errorf("boundary roots not validly stored. They have slot %d", st.Slot())
}
}
}
func TestFilterBoundaryCandidates_HandleSkippedSlots(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 500; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
lastIndex := len(service.boundaryRoots) - 1
for i := uint64(500); i < 2000; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
// save only for offsetted slots
if helpers.IsEpochStart(i + 10) {
service.initSyncState[root] = st.Copy()
}
}
// Set current state.
latestSlot := helpers.RoundUpToNearestEpoch(2000)
if err := st.SetSlot(latestSlot); err != nil {
t.Fatal(err)
}
lastRoot := [32]byte{}
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
service.initSyncState[lastRoot] = st.Copy()
service.finalizedCheckpt = &ethpb.Checkpoint{
Epoch: 0,
Root: []byte{},
}
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
t.Fatal("Wanted non zero added boundary roots")
}
for _, rt := range service.boundaryRoots[lastIndex+1:] {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if st.Slot() >= 500 {
// Ignore head boundary root.
if st.Slot() == 2016 {
continue
}
if !helpers.IsEpochStart(st.Slot() + 10) {
t.Errorf("boundary roots not validly stored. They have slot %d "+
"instead of the offset from epoch start", st.Slot())
}
}
}
}
func TestPruneOldStates_AlreadyFinalized(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
for i := uint64(100); i < 200; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
service.boundaryRoots = append(service.boundaryRoots, root)
}
finalizedEpoch := uint64(5)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: finalizedEpoch}
service.pruneOldStates()
for _, rt := range service.boundaryRoots {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doen't exist in cache map")
continue
}
if st.Slot() < helpers.StartSlot(finalizedEpoch) {
t.Errorf("State with slot %d still exists and not pruned", st.Slot())
}
}
}
func TestPruneNonBoundary_CanPrune(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 2000; i++ {
if err := st.SetSlot(i); err != nil {
t.Fatal(err)
}
root := [32]byte{}
copy(root[:], bytesutil.Bytes32(i))
service.initSyncState[root] = st.Copy()
if helpers.IsEpochStart(i) {
service.boundaryRoots = append(service.boundaryRoots, root)
}
}
service.pruneNonBoundaryStates()
for _, rt := range service.boundaryRoots {
st, ok := service.initSyncState[rt]
if !ok {
t.Error("Root doesn't exist in cache map")
continue
}
if !helpers.IsEpochStart(st.Slot()) {
t.Errorf("Non boundary state with slot %d still exists and not pruned", st.Slot())
}
}
}
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
db := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
service, err := NewService(context.Background(), cfg)
if err != nil {
t.Fatal(err)
}
beaconState, privs := testutil.DeterministicGenesisState(t, 32)
genesisBlock := testutil.NewBeaconBlock()
bodyRoot, err := stateutil.BlockRoot(genesisBlock.Block)
if err != nil {
t.Fatal(err)
}
err = beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: genesisBlock.Block.Slot,
ParentRoot: genesisBlock.Block.ParentRoot,
StateRoot: params.BeaconConfig().ZeroHash[:],
BodyRoot: bodyRoot[:],
})
if err != nil {
t.Fatal(err)
}
if err := beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)); err != nil {
t.Fatal(err)
}
cp := beaconState.CurrentJustifiedCheckpoint()
mockRoot := [32]byte{}
copy(mockRoot[:], "hello-world")
cp.Root = mockRoot[:]
if err := beaconState.SetCurrentJustifiedCheckpoint(cp); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
err = db.SaveBlock(context.Background(), genesisBlock)
if err != nil {
t.Fatal(err)
}
genRoot, err := stateutil.BlockRoot(genesisBlock.Block)
if err != nil {
t.Fatal(err)
}
err = db.SaveState(context.Background(), beaconState, genRoot)
if err != nil {
t.Fatal(err)
}
lastBlock := &ethpb.SignedBeaconBlock{}
for i := uint64(1); i < 10; i++ {
block, err := testutil.GenerateFullBlock(beaconState, privs, testutil.DefaultBlockGenConfig(), i)
if err != nil {
t.Fatal(err)
}
beaconState, err = state.ExecuteStateTransition(context.Background(), beaconState, block)
if err != nil {
t.Fatal(err)
}
err = db.SaveBlock(context.Background(), block)
if err != nil {
t.Fatal(err)
}
lastBlock = block
}
root, err := stateutil.BlockRoot(lastBlock.Block)
if err != nil {
t.Fatal(err)
}
newState, err := service.generateState(context.Background(), genRoot, root)
if err != nil {
t.Fatal(err)
}
if !ssz.DeepEqual(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
diff, _ := messagediff.PrettyDiff(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
t.Errorf("Generated state is different from what is expected: %s", diff)
}
}

View File

@@ -6,8 +6,6 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
@@ -33,30 +31,3 @@ func logBlockSyncStatus(block *ethpb.BeaconBlock, blockRoot [32]byte, finalized
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root[:])[:8]),
}).Info("Synced new block")
}
func logEpochData(beaconState *stateTrie.BeaconState) {
log.WithFields(logrus.Fields{
"epoch": helpers.CurrentEpoch(beaconState),
"finalizedEpoch": beaconState.FinalizedCheckpointEpoch(),
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint().Epoch,
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint().Epoch,
}).Info("Starting next epoch")
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
if err != nil {
log.WithError(err).Error("Could not get active validator indices")
return
}
log.WithFields(logrus.Fields{
"totalValidators": len(beaconState.Validators()),
"activeValidators": len(activeVals),
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances())),
}).Info("Validator registry information")
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -78,11 +78,11 @@ var (
})
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_eligible_balances",
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
})
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_voted_target_balances",
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
})
reorgCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_reorg_total",
@@ -138,6 +138,7 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
for i, validator := range state.Validators() {
bal, err := state.BalanceAtIndex(uint64(i))
if err != nil {
log.Errorf("Could not load validator balance: %v", err)
continue
}
if validator.Slashed {

View File

@@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -20,7 +19,8 @@ import (
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
/// it to the DB.
// it to the DB. As a stateless function, this does not hold nor delay attestation based on the spec descriptions.
// The delay is handled by the caller in `processAttestation`.
//
// Spec pseudocode definition:
// def on_attestation(store: Service, attestation: Attestation) -> None:
@@ -68,7 +68,7 @@ var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
// TODO(#6072): This code path is highly untested. Requires comprehensive tests and simpler refactoring.
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
ctx, span := trace.StartSpan(ctx, "blockchain.onAttestation")
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()
if a == nil {
@@ -121,19 +121,12 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui
return nil, err
}
// Use the target state to to validate attestation and calculate the committees.
// Use the target state to validate attestation and calculate the committees.
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
if err != nil {
return nil, err
}
// Only save attestation in DB for archival node.
if flags.Get().EnableArchive {
if err := s.beaconDB.SaveAttestation(ctx, a); err != nil {
return nil, err
}
}
if indexedAtt.AttestingIndices == nil {
return nil, errors.New("nil attesting indices")
}

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
@@ -15,7 +14,6 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
)
@@ -31,76 +29,17 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
return cachedState, nil
}
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.HasState(ctx, bytesutil.ToBytes32(c.Root)) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, errors.Wrap(err, "could not save initial sync blocks")
}
s.clearInitSyncBlocks()
}
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if helpers.StartSlot(c.Epoch) > baseState.Slot() {
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(c.Epoch))
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
}
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: baseState,
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
}
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return st, nil
}
}
baseState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
if helpers.StartSlot(c.Epoch) > baseState.Slot() {
baseState = baseState.Copy()
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(c.Epoch))
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: baseState,
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
@@ -111,6 +50,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
}
return baseState, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
@@ -130,10 +70,16 @@ func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64,
// verifyBeaconBlock verifies beacon head block is known and not from the future.
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
b, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(data.BeaconBlockRoot))
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
b, err := s.beaconDB.Block(ctx, r)
if err != nil {
return err
}
// If the block does not exist in db, check again if block exists in initial sync block cache.
// This could happen as the node first syncs to head.
if b == nil && s.hasInitSyncBlock(r) {
b = s.getInitSyncBlock(r)
}
if b == nil || b.Block == nil {
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
}
@@ -165,48 +111,7 @@ func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.Be
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
if err == helpers.ErrSigFailedToVerify {
// When sig fails to verify, check if there's a differences in committees due to
// different seeds.
var aState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, errors.Wrap(err, "could not save initial sync blocks")
}
s.clearInitSyncBlocks()
}
aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
} else {
aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
}
if aState == nil {
return nil, fmt.Errorf("nil state for block root %#x", a.Data.BeaconBlockRoot)
}
epoch := helpers.SlotToEpoch(a.Data.Slot)
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrap(err, "could not get original seed")
}
aSeed, err := helpers.Seed(aState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrap(err, "could not get attester's seed")
}
if origSeed != aSeed {
return nil, fmt.Errorf("could not verify indexed attestation due to differences in seeds: %v != %v",
hex.EncodeToString(bytesutil.Trunc(origSeed[:])), hex.EncodeToString(bytesutil.Trunc(aSeed[:])))
}
}
return nil, errors.Wrap(err, "could not verify indexed attestation")
}
return indexedAtt, nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -19,62 +18,44 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestStore_OnAttestation(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
StateGen: stategen.New(db, sc),
}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
_, err = blockTree1(db, []byte{'g'})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
BlkWithOutState := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 0}}
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
t.Fatal(err)
}
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
BlkWithOutStateRoot, err := stateutil.BlockRoot(BlkWithOutState.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
BlkWithStateBadAtt := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
t.Fatal(err)
}
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
BlkWithStateBadAttRoot, err := stateutil.BlockRoot(BlkWithStateBadAtt.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
s := testutil.NewBeaconState()
if err := s.SetSlot(100 * params.BeaconConfig().SlotsPerEpoch); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot); err != nil {
t.Fatal(err)
}
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
BlkWithValidState := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
t.Fatal(err)
}
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
BlkWithValidStateRoot, err := stateutil.BlockRoot(BlkWithValidState.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
s = testutil.NewBeaconState()
if err := s.SetFork(&pb.Fork{
Epoch: 0,
@@ -83,9 +64,7 @@ func TestStore_OnAttestation(t *testing.T) {
}); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
tests := []struct {
name string
@@ -113,7 +92,7 @@ func TestStore_OnAttestation(t *testing.T) {
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
s: &pb.BeaconState{},
wantErr: true,
wantErrString: "pre state of target block 0 does not exist",
wantErrString: "could not get pre state for slot 0",
},
{
name: "process attestation doesn't match current epoch",
@@ -121,7 +100,7 @@ func TestStore_OnAttestation(t *testing.T) {
Root: BlkWithStateBadAttRoot[:]}}},
s: &pb.BeaconState{Slot: 100 * params.BeaconConfig().SlotsPerEpoch},
wantErr: true,
wantErrString: "does not match current epoch",
wantErrString: "target epoch 100 does not match current epoch",
},
{
name: "process nil field (a.Target) in attestation",
@@ -162,16 +141,14 @@ func TestStore_OnAttestation(t *testing.T) {
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
StateGen: stategen.New(db, sc),
}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Fork: &pb.Fork{
@@ -189,13 +166,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
Validators: []*ethpb.Validator{{PublicKey: bytesutil.PadTo([]byte("foo"), 48)}},
Balances: []uint64{0},
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
r := [32]byte{'g'}
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, s, r))
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
@@ -204,141 +177,80 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r = bytesutil.ToBytes32([]byte{'A'})
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
s1, err := service.getAttPreState(ctx, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
s2, err := service.getAttPreState(ctx, cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
}
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
s1, err = service.checkpointState.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
}
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
s2, err = service.checkpointState.StateByCheckpoint(cp2)
if err != nil {
t.Fatal(err)
}
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
}
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
if err := s.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1); err != nil {
t.Fatal(err)
}
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
s3, err := service.getAttPreState(ctx, cp3)
if err != nil {
t.Fatal(err)
}
if s3.Slot() != s.Slot() {
t.Errorf("Wanted state slot: %d, got: %d", s.Slot(), s3.Slot())
}
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
}
func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
StateGen: stategen.New(db, sc),
}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
epoch := uint64(1)
baseState, _ := testutil.DeterministicGenesisState(t, 1)
if err := baseState.SetSlot(epoch * params.BeaconConfig().SlotsPerEpoch); err != nil {
t.Fatal(err)
}
require.NoError(t, baseState.SetSlot(epoch*params.BeaconConfig().SlotsPerEpoch))
checkpoint := &ethpb.Checkpoint{Epoch: epoch}
if err := service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
returned, err := service.getAttPreState(ctx, checkpoint)
if err != nil {
t.Fatal(err)
}
if baseState.Slot() != returned.Slot() {
t.Error("Incorrectly returned base state")
}
require.NoError(t, err)
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
if err != nil {
t.Fatal(err)
}
if cached == nil {
t.Error("State should have been cached")
}
require.NoError(t, err)
assert.NotNil(t, cached, "State should have been cached")
epoch = uint64(2)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch}
if err := service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
returned, err = service.getAttPreState(ctx, newCheckpoint)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
if err != nil {
t.Fatal(err)
}
if baseState.Slot() != returned.Slot() {
t.Error("Incorrectly returned base state")
}
require.NoError(t, err)
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
t.Error("Incorrectly cached base state")
}
@@ -346,13 +258,11 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if err := service.verifyAttTargetEpoch(
ctx,
@@ -365,13 +275,11 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if err := service.verifyAttTargetEpoch(
ctx,
@@ -384,13 +292,11 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if err := service.verifyAttTargetEpoch(
ctx,
@@ -404,132 +310,88 @@ func TestAttEpoch_NotMatch(t *testing.T) {
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
d := &ethpb.AttestationData{}
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
t.Error("Did not receive the wanted error")
}
assert.ErrorContains(t, "beacon block does not exist", service.verifyBeaconBlock(ctx, d))
}
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
if err := service.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
r, err := stateutil.BlockRoot(b.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
err = service.verifyBeaconBlock(ctx, d)
if err == nil || !strings.Contains(err.Error(), "could not process attestation for future block") {
t.Error("Did not receive the wanted error")
}
assert.ErrorContains(t, "could not process attestation for future block", service.verifyBeaconBlock(ctx, d))
}
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 2}}
if err := service.beaconDB.SaveBlock(ctx, b); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
r, err := stateutil.BlockRoot(b.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
if err := service.verifyBeaconBlock(ctx, d); err != nil {
t.Error("Did not receive the wanted error")
}
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b32 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 32}}
if err := service.beaconDB.SaveBlock(ctx, b32); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
r32, err := stateutil.BlockRoot(b32.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b33 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 33, ParentRoot: r32[:]}}
if err := service.beaconDB.SaveBlock(ctx, b33); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
r33, err := stateutil.BlockRoot(b33.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
wanted := "FFG and LMD votes are not consistent"
if err := service.verifyLMDFFGConsistent(context.Background(), 1, []byte{'a'}, r33[:]); err.Error() != wanted {
t.Error("Did not get wanted error")
}
assert.ErrorContains(t, wanted, service.verifyLMDFFGConsistent(context.Background(), 1, []byte{'a'}, r33[:]))
}
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
service, err := NewService(ctx, cfg)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b32 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 32}}
if err := service.beaconDB.SaveBlock(ctx, b32); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
r32, err := stateutil.BlockRoot(b32.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
b33 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 33, ParentRoot: r32[:]}}
if err := service.beaconDB.SaveBlock(ctx, b33); err != nil {
t.Fatal(err)
}
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
r33, err := stateutil.BlockRoot(b33.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if err := service.verifyLMDFFGConsistent(context.Background(), 1, r32[:], r33[:]); err != nil {
t.Errorf("Could not verify LMD and FFG votes to be consistent: %v", err)
}
err = service.verifyLMDFFGConsistent(context.Background(), 1, r32[:], r33[:])
assert.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
}

View File

@@ -2,20 +2,18 @@ package blockchain
import (
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -41,7 +39,7 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
// store.finalized_checkpoint.root
// )
// # Check that block is later than the finalized epoch slot
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// # Check the block is valid and compute the post-state
// state = state_transition(pre_state, block)
// # Add new state for this block to the store
@@ -55,145 +53,71 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if signed == nil || signed.Block == nil {
return nil, errors.New("nil block")
return errors.New("nil block")
}
b := signed.Block
// Retrieve incoming block's pre state.
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return nil, err
return err
}
log.WithFields(logrus.Fields{
"slot": b.Slot,
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
}).Debug("Executing state transition on block")
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return nil, errors.Wrap(err, "could not execute state transition")
return errors.Wrap(err, "could not execute state transition")
}
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return nil, errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, postState); err != nil {
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, blockRoot, postState); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
} else {
if err := s.beaconDB.SaveState(ctx, postState, blockRoot); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustified(ctx, postState); err != nil {
return nil, err
return err
}
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
// Update finalized check point.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, err
}
s.clearInitSyncBlocks()
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
return err
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
// Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned.
if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
return nil, errors.Wrap(err, "could not prune proto array fork choice nodes")
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return nil, errors.Wrap(err, "could not save new justified")
return errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
fBlock, err := s.beaconDB.Block(ctx, fRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block to migrate")
}
if err := s.stateGen.MigrateToCold(ctx, fBlock.Block.Slot, fRoot); err != nil {
return nil, errors.Wrap(err, "could not migrate to cold")
}
}
}
// Epoch boundary bookkeeping such as logging epoch summaries.
if postState.Slot() >= s.nextEpochBoundarySlot {
logEpochData(postState)
reportEpochMetrics(postState)
// Update committees cache at epoch boundary slot.
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return nil, err
}
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
}
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
return nil, err
}
// Delete the processed block attester slashings from slashings pool.
for i := 0; i < len(b.Body.AttesterSlashings); i++ {
s.slashingPool.MarkIncludedAttesterSlashing(b.Body.AttesterSlashings[i])
// Update deposit cache.
s.depositCache.InsertFinalizedDeposits(ctx, int64(postState.Eth1DepositIndex()))
}
defer reportAttestationInclusion(b)
return postState, nil
return s.handleEpochBoundary(postState)
}
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
// It runs state transition on the block and without any BLS verification. The excluded BLS verification
// includes attestation's aggregated signature. It also does not save attestations.
// It runs state transition on the block and without fork choice and post operation pool processes.
// The block's signing root should be computed before calling this method to avoid redundant
// computation in this method and methods it calls into.
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockInitialSyncStateTransition")
defer span.End()
if signed == nil || signed.Block == nil {
@@ -203,128 +127,164 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
b := signed.Block
// Retrieve incoming block's pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err := s.verifyBlkPreState(ctx, b); err != nil {
return err
}
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(signed.Block.ParentRoot))
if err != nil {
return err
}
// To invalidate cache for parent root because pre state will get mutated.
s.stateGen.DeleteHotStateInCache(bytesutil.ToBytes32(b.ParentRoot))
if preState == nil {
return fmt.Errorf("nil pre state for slot %d", b.Slot)
}
// Exit early if the pre state slot is higher than incoming block's slot.
if preState.Slot() >= signed.Block.Slot {
return nil
}
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
var postState *stateTrie.BeaconState
if featureconfig.Get().InitSyncNoVerify {
postState, err = state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
} else {
postState, err = state.ExecuteStateTransition(ctx, preState, signed)
}
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
s.saveInitSyncBlock(blockRoot, signed)
} else {
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
}
}
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, blockRoot, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
} else {
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
s.initSyncState[blockRoot] = postState.Copy()
s.filterBoundaryCandidates(ctx, blockRoot, postState)
}
if flags.Get().EnableArchive {
atts := signed.Block.Body.Attestations
if err := s.beaconDB.SaveAttestations(ctx, atts); err != nil {
return errors.Wrapf(err, "could not save block attestations from slot %d", b.Slot)
}
}
// Update justified check point.
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustified(ctx, postState); err != nil {
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, true /* init sync */); err != nil {
return err
}
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if len(s.getInitSyncBlocks()) > int(initialSyncBlockCacheSize) {
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustifiedInitSync(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
return err
}
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
}
}
if !featureconfig.Get().NoInitSyncBatchSaveBlocks {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
fBlock, err := s.beaconDB.Block(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block to migrate")
}
if err := s.stateGen.MigrateToCold(ctx, fBlock.Block.Slot, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
return err
}
}
if !featureconfig.Get().NewStateMgmt {
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
}
return s.handleEpochBoundary(postState)
}
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
blockRoots [][32]byte) (*stateTrie.BeaconState, []*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if len(blks) == 0 || len(blockRoots) == 0 {
return nil, nil, nil, errors.New("no blocks provided")
}
if blks[0] == nil || blks[0].Block == nil {
return nil, nil, nil, errors.New("nil block")
}
b := blks[0].Block
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, nil, nil, err
}
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, nil, nil, err
}
if preState == nil {
return nil, nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot)
}
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
sigSet := &bls.SignatureSet{
Signatures: []bls.Signature{},
PublicKeys: []bls.PublicKey{},
Messages: [][32]byte{},
}
set := new(bls.SignatureSet)
boundaries := make(map[[32]byte]*stateTrie.BeaconState)
for i, b := range blks {
set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, nil, err
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
// Save potential boundary states.
if helpers.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
sigSet.Join(set)
}
verify, err := bls.VerifyMultipleSignatures(sigSet.Signatures, sigSet.Messages, sigSet.PublicKeys)
if err != nil {
return nil, nil, nil, err
}
if !verify {
return nil, nil, nil, errors.New("batch block signature verification failed")
}
for r, st := range boundaries {
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, nil, err
}
}
return preState, fCheckpoints, jCheckpoints, nil
}
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
b := signed.Block
s.saveInitSyncBlock(blockRoot, signed)
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
s.stateGen.SaveStateSummary(ctx, signed, blockRoot)
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
if jCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
return err
}
}
// Epoch boundary bookkeeping such as logging epoch summaries.
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if fCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
return err
}
}
return nil
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(postState *stateTrie.BeaconState) error {
if postState.Slot() >= s.nextEpochBoundarySlot {
reportEpochMetrics(postState)
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
@@ -336,32 +296,19 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return err
}
if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) {
if err := s.beaconDB.SaveState(ctx, postState, blockRoot); err != nil {
return errors.Wrap(err, "could not save state")
}
}
}
return nil
}
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte, state *stateTrie.BeaconState) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, state); err != nil {
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte,
state *stateTrie.BeaconState) error {
fCheckpoint := state.FinalizedCheckpoint()
jCheckpoint := state.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.forkChoiceStore.ProcessBlock(ctx,
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot), bytesutil.ToBytes32(blk.Body.Graffiti),
state.CurrentJustifiedCheckpoint().Epoch,
state.FinalizedCheckpointEpoch()); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body.Attestations {
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
@@ -371,6 +318,41 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
indices := attestationutil.AttestingIndices(a.AggregationBits, committee)
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
}
return nil
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
root [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.forkChoiceStore.ProcessBlock(ctx,
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot), bytesutil.ToBytes32(blk.Body.Graffiti),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
return nil
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState, initSync bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
s.saveInitSyncBlock(r, b)
} else {
if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
}
}
if err := s.stateGen.SaveState(ctx, r, state); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, state); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block.Slot)
}
return nil
}

View File

@@ -8,11 +8,10 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/traceutil"
@@ -33,34 +32,29 @@ func (s *Service) CurrentSlot() uint64 {
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
ctx, span := trace.StartSpan(ctx, "forkChoice.getBlockPreState")
defer span.End()
// Verify incoming block has a valid pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, err
}
// For new state management, this ensures the state does not get mutated since initial syncing
// uses verifyBlkPreState.
if featureconfig.Get().NewStateMgmt {
preState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
// Verify block slot time is not from the feature.
// Verify block slot time is not from the future.
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, err
}
// Verify block is a descendent of a finalized block.
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
return nil, err
}
@@ -73,88 +67,52 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
defer span.End()
if featureconfig.Get().NewStateMgmt {
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
if !s.stateGen.StateSummaryExists(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
return nil, errors.New("could not reconstruct parent state")
}
if !s.stateGen.HasState(ctx, parentRoot) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return nil, errors.Wrap(err, "could not save initial sync blocks")
}
s.clearInitSyncBlocks()
}
preState, err := s.stateGen.StateByRootInitialSync(ctx, parentRoot)
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
return preState, nil // No copy needed from newly hydrated state gen object.
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
if !s.stateGen.StateSummaryExists(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
var err error
if preState == nil {
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
}
if bytes.Equal(headRoot, b.ParentRoot) {
return s.HeadState(ctx)
}
if !s.stateGen.HasState(ctx, parentRoot) {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return errors.Wrap(err, "could not save initial sync blocks")
}
preState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
if bytes.Equal(s.finalizedCheckpt.Root, b.ParentRoot) {
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
}
preState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, err
}
}
return preState, nil // No copy needed from newly hydrated DB object.
s.clearInitSyncBlocks()
}
return preState.Copy(), nil
return nil
}
// verifyBlkDescendant validates input block root is a descendant of the
// VerifyBlkDescendant validates input block root is a descendant of the
// current finalized block root.
func (s *Service) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "forkChoice.VerifyBlkDescendant")
defer span.End()
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
return errors.Wrap(err, "could not get finalized block")
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
if err != nil {
return err
}
if finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
return errors.New("nil finalized block")
}
finalizedBlk := finalizedBlkSigned.Block
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if bFinalizedRoot == nil {
return fmt.Errorf("no finalized block known for block from slot %d", slot)
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
}
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
traceutil.AnnotateError(span, err)
return err
}
@@ -171,64 +129,6 @@ func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
return nil
}
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
func (s *Service) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
defer span.End()
// Make sure start slot is not a skipped slot
for i := startSlot; i > 0; i-- {
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
b, err := s.beaconDB.Blocks(ctx, filter)
if err != nil {
return err
}
if len(b) > 0 {
startSlot = i
break
}
}
// Make sure finalized slot is not a skipped slot.
for i := endSlot; i > 0; i-- {
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
b, err := s.beaconDB.Blocks(ctx, filter)
if err != nil {
return err
}
if len(b) > 0 {
endSlot = i - 1
break
}
}
// Do not remove genesis state
if startSlot == 0 {
startSlot++
}
// If end slot comes less than start slot
if endSlot < startSlot {
endSlot = startSlot
}
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
roots, err = s.filterBlockRoots(ctx, roots)
if err != nil {
return err
}
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
log.Warnf("Could not delete states: %v", err)
}
return nil
}
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
@@ -238,9 +138,9 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
return true, nil
}
var newJustifiedBlockSigned *ethpb.SignedBeaconBlock
justifiedRoot := bytesutil.ToBytes32(newJustifiedCheckpt.Root)
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
var err error
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(justifiedRoot) {
if s.hasInitSyncBlock(justifiedRoot) {
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
} else {
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
@@ -257,8 +157,8 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
return false, nil
}
var justifiedBlockSigned *ethpb.SignedBeaconBlock
cachedJustifiedRoot := bytesutil.ToBytes32(s.justifiedCheckpt.Root)
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(cachedJustifiedRoot) {
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if s.hasInitSyncBlock(cachedJustifiedRoot) {
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
} else {
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
@@ -271,7 +171,7 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
return false, errors.New("nil justified block")
}
justifiedBlock := justifiedBlockSigned.Block
b, err := s.ancestor(ctx, newJustifiedCheckpt.Root, justifiedBlock.Slot)
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot)
if err != nil {
return false, err
}
@@ -294,103 +194,60 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
if canUpdate {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cpt
}
if !featureconfig.Get().NewStateMgmt {
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
var err error
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
}
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
// This saves every finalized state in DB during initial sync, needed as part of optimization to
// use cache state during initial sync in case of restart.
func (s *Service) saveInitState(ctx context.Context, state *stateTrie.BeaconState) error {
cpt := state.FinalizedCheckpoint()
finalizedRoot := bytesutil.ToBytes32(cpt.Root)
fs := s.initSyncState[finalizedRoot]
if fs == nil {
var err error
fs, err = s.beaconDB.State(ctx, finalizedRoot)
if err != nil {
return err
}
if fs == nil {
fs, err = s.generateState(ctx, bytesutil.ToBytes32(s.prevFinalizedCheckpt.Root), finalizedRoot)
if err != nil {
// This might happen if the client was in sync and is now re-syncing for whatever reason.
log.Warn("Initial sync cache did not have finalized state root cached")
return err
}
}
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cp
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
if err := s.beaconDB.SaveState(ctx, fs, finalizedRoot); err != nil {
return errors.Wrap(err, "could not save state")
}
return nil
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cp)
}
// This filters block roots that are not known as head root and finalized root in DB.
// It serves as the last line of defence before we prune states.
func (s *Service) filterBlockRoots(ctx context.Context, roots [][32]byte) ([][32]byte, error) {
f, err := s.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return nil, err
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
// Blocks need to be saved so that we can retrieve finalized block from
// DB when migrating states.
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
fRoot := f.Root
h, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
return nil, err
}
hRoot, err := stateutil.BlockRoot(h.Block)
if err != nil {
return nil, err
s.clearInitSyncBlocks()
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = cp
fRoot := bytesutil.ToBytes32(cp.Root)
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
filtered := make([][32]byte, 0, len(roots))
for _, root := range roots {
if bytes.Equal(root[:], fRoot[:]) || bytes.Equal(root[:], hRoot[:]) {
continue
}
filtered = append(filtered, root)
}
return filtered, nil
return s.beaconDB.SaveFinalizedCheckpoint(ctx, cp)
}
// ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot.
// return root
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
ctx, span := trace.StartSpan(ctx, "forkChoice.ancestor")
defer span.End()
// Stop recursive ancestry lookup if context is cancelled.
@@ -403,7 +260,7 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
return nil, errors.Wrap(err, "could not get ancestor block")
}
if !featureconfig.Get().NoInitSyncBatchSaveBlocks && s.hasInitSyncBlock(bytesutil.ToBytes32(root)) {
if s.hasInitSyncBlock(bytesutil.ToBytes32(root)) {
signed = s.getInitSyncBlock(bytesutil.ToBytes32(root))
}
@@ -411,14 +268,7 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
return nil, errors.New("nil block")
}
b := signed.Block
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
// operation can proceed. This is not an error condition.
if b == nil || b.Slot < slot {
return nil, nil
}
if b.Slot == slot {
if b.Slot == slot || b.Slot < slot {
return root, nil
}
@@ -429,34 +279,49 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
// the store's justified is not in chain with finalized check point.
//
// Spec definition:
// if (
// state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch
// or get_ancestor(store, store.justified_checkpoint.root, finalized_slot) != store.finalized_checkpoint.root
// ):
// store.justified_checkpoint = state.current_justified_checkpoint
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
return errors.Wrap(err, "could not get finalized block")
}
finalizedBlk := finalizedBlkSigned.Block
// Update justified if it's different than the one cached in the store.
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
return nil
}
anc, err := s.ancestor(ctx, s.justifiedCheckpt.Root, finalizedBlk.Slot)
if err != nil {
return err
// Update justified if store justified is not in chain with finalized check point.
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
anc, err := s.ancestor(ctx, justifiedRoot[:], finalizedSlot)
if err != nil {
return err
}
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
}
// Either the new justified is later than stored justified or not in chain with finalized check pint.
if cpt := state.CurrentJustifiedCheckpoint(); cpt != nil && cpt.Epoch > s.justifiedCheckpt.Epoch || !bytes.Equal(anc, s.finalizedCheckpt.Root) {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
}
return nil
}
// This retrieves missing blocks from DB (ie. the blocks that couldn't received over sync) and inserts them to fork choice store.
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock, state *stateTrie.BeaconState) error {
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*ethpb.BeaconBlock, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
@@ -487,8 +352,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
if err := s.forkChoiceStore.ProcessBlock(ctx,
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
state.CurrentJustifiedCheckpoint().Epoch,
state.FinalizedCheckpointEpoch()); err != nil {
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
}
@@ -512,3 +377,12 @@ func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
return nil
}
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
// fork choice justification routine.
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
if root == params.BeaconConfig().ZeroHash {
return s.genesisRoot
}
return root
}

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ type AttestationReceiver interface {
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
}
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
// attestation that is received from regular sync. The operations consist of:
// 1. Validate attestation, update validator's latest vote
// 2. Apply fork choice to the processed attestation
@@ -41,15 +41,10 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
}
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
baseState, err := s.getAttPreState(ctx, att.Data.Target)
if err != nil {
return err
}
// This updates fork choice head, if a new head could not be updated due to
// long range or intermediate forking. It simply logs a warning and returns nil
// as that's more appropriate than returning errors.
if err := s.updateHead(ctx, baseState.Balances()); err != nil {
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
return nil
}
@@ -105,13 +100,7 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
continue
}
var hasState bool
if featureconfig.Get().NewStateMgmt {
hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
} else {
hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
}
hasState := s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue

View File

@@ -5,20 +5,18 @@ import (
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
)
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
db := testDB.SetupDB(t)
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db)
chainService := setupBeaconChain(t, db, sc)
chainService.genesisTime = time.Now()
if !chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{}) {
t.Error("Wanted true, got false")
}
if chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 1}) {
t.Error("Wanted false, got true")
}
assert.Equal(t, true, chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{}))
assert.Equal(t, false, chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 1}))
}

View File

@@ -3,16 +3,12 @@ package blockchain
import (
"bytes"
"context"
"encoding/hex"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -21,83 +17,28 @@ import (
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error
HasInitSyncBlock(root [32]byte) bool
}
// ReceiveBlock is a function that defines the operations that are preformed on
// blocks that is received from rpc service. The operations consists of:
// 1. Gossip block to other peers
// 2. Validate block, apply state transition and update check points
// 3. Apply fork choice to the processed block
// 4. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
defer span.End()
// Broadcast the new block to the network.
if err := s.p2p.Broadcast(ctx, block); err != nil {
return errors.Wrap(err, "could not broadcast block")
}
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(blockRoot[:]),
}).Debug("Broadcasting block")
if err := captureSentTimeMetric(uint64(s.genesisTime.Unix()), block.Block.Slot); err != nil {
// If a node fails to capture metric, this shouldn't cause the block processing to fail.
log.Warnf("Could not capture block sent time metric: %v", err)
}
if err := s.ReceiveBlockNoPubsub(ctx, block, blockRoot); err != nil {
return err
}
return nil
}
// ReceiveBlockNoPubsub is a function that defines the the operations (minus pubsub)
// that are preformed on blocks that is received from regular sync service. The operations consists of:
// ReceiveBlock is a function that defines the the operations (minus pubsub)
// that are performed on blocks that is received from regular sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the new block.
postState, err := s.onBlock(ctx, blockCopy, blockRoot)
if err != nil {
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
// Add attestations from the block to the pool for fork choice.
if err := s.attPool.SaveBlockAttestations(blockCopy.Block.Body.Attestations); err != nil {
log.Errorf("Could not save attestation for fork choice: %v", err)
return nil
}
for _, exit := range block.Block.Body.VoluntaryExits {
s.exitPool.MarkIncluded(exit)
}
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
if featureconfig.Get().DisableForkChoice && block.Block.Slot > s.headSlot() {
if err := s.saveHead(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save head")
}
} else {
if err := s.updateHead(ctx, postState.Balances()); err != nil {
return errors.Wrap(err, "could not save head")
}
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
@@ -108,6 +49,16 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedB
},
})
// Handle post block operations such as attestations and exits.
if err := s.handlePostBlockOperations(blockCopy.Block); err != nil {
return err
}
// Update and save head block after fork choice.
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
return errors.Wrap(err, "could not update head")
}
// Reports on block and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
@@ -120,85 +71,25 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedB
return nil
}
// ReceiveBlockNoPubsubForkchoice is a function that defines the all operations (minus pubsub and forkchoice)
// that are preformed blocks that is received from initial sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// 2. Save latest head info
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
// ReceiveBlockInitialSync processes the input block for the purpose of initial syncing.
// This method should only be used on blocks during initial syncing phase.
func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockNoVerify")
defer span.End()
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the new block.
_, err := s.onBlock(ctx, blockCopy, blockRoot)
if err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if !bytes.Equal(blockRoot[:], cachedHeadRoot) {
if err := s.saveHead(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save head")
}
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
Verified: true,
},
})
// Reports on block and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
// Log block sync status.
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
// Log state transition data.
logStateTransitionData(blockCopy.Block)
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
return nil
}
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
// It simulates light client behavior and assumes 100% trust with the syncing peer.
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
defer span.End()
blockCopy := stateTrie.CopySignedBeaconBlock(block)
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if !bytes.Equal(blockRoot[:], cachedHeadRoot) {
if err := s.saveHeadNoDB(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not save head")
traceutil.AnnotateError(span, err)
return err
}
// Save the latest block as head in cache.
if err := s.saveHeadNoDB(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not save head")
traceutil.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
@@ -207,7 +98,7 @@ func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedB
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
Verified: false,
Verified: true,
},
})
@@ -221,14 +112,90 @@ func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedB
"deposits": len(blockCopy.Block.Body.Deposits),
}).Debug("Finished applying state transition")
s.epochParticipationLock.Lock()
defer s.epochParticipationLock.Unlock()
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
return nil
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
postState, fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block")
traceutil.AnnotateError(span, err)
return err
}
for i, b := range blocks {
blockCopy := stateTrie.CopySignedBeaconBlock(b)
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
traceutil.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blkRoots[i],
Verified: true,
},
})
// Reports on blockCopy and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
}
lastBlk := blocks[len(blocks)-1]
lastRoot := blkRoots[len(blkRoots)-1]
if err := s.stateGen.SaveState(ctx, lastRoot, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
}
if !bytes.Equal(lastRoot[:], cachedHeadRoot) {
if err := s.saveHeadNoDB(ctx, lastBlk, lastRoot); err != nil {
err := errors.Wrap(err, "could not save head")
traceutil.AnnotateError(span, err)
return err
}
}
return s.handleEpochBoundary(postState)
}
// HasInitSyncBlock returns true if the block of the input root exists in initial sync blocks cache.
func (s *Service) HasInitSyncBlock(root [32]byte) bool {
return s.hasInitSyncBlock(root)
}
func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
return err
}
// Add block attestations to the fork choice pool to compute head.
if err := s.attPool.SaveBlockAttestations(b.Body.Attestations); err != nil {
log.Errorf("Could not save block attestations for fork choice: %v", err)
return nil
}
// Mark block exits as seen so we don't include same ones in future blocks.
for _, e := range b.Body.VoluntaryExits {
s.exitPool.MarkIncluded(e)
}
// Mark attester slashings as seen so we don't include same ones in future blocks.
for _, as := range b.Body.AttesterSlashings {
s.slashingPool.MarkIncludedAttesterSlashing(as)
}
return nil
}

View File

@@ -0,0 +1,326 @@
package blockchain
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_ReceiveBlock(t *testing.T) {
ctx := context.Background()
genesis, keys := testutil.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
assert.NoError(t, err)
return blk
}
bc := params.BeaconConfig()
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
params.OverrideBeaconConfig(bc)
type args struct {
block *ethpb.SignedBeaconBlock
}
tests := []struct {
name string
args args
wantErr bool
check func(*testing.T, *Service)
}{
{
name: "applies block with state transition",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if hs := s.head.state.Slot(); hs != 2 {
t.Errorf("Unexpected state slot. Got %d but wanted %d", hs, 2)
}
if bs := s.head.block.Block.Slot; bs != 2 {
t.Errorf("Unexpected head block slot. Got %d but wanted %d", bs, 2)
}
},
},
{
name: "saves attestations to pool",
args: args{
block: genFullBlock(t,
&testutil.BlockGenConfig{
NumProposerSlashings: 0,
NumAttesterSlashings: 0,
NumAttestations: 2,
NumDeposits: 0,
NumVoluntaryExits: 0,
},
1, /*slot*/
),
},
check: func(t *testing.T, s *Service) {
if baCount := len(s.attPool.BlockAttestations()); baCount != 2 {
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
"Got %d but wanted %d", baCount, 2)
}
},
},
{
name: "updates exit pool",
args: args{
block: genFullBlock(t, &testutil.BlockGenConfig{
NumProposerSlashings: 0,
NumAttesterSlashings: 0,
NumAttestations: 0,
NumDeposits: 0,
NumVoluntaryExits: 3,
},
1, /*slot*/
),
},
check: func(t *testing.T, s *Service) {
var n int
for i := uint64(0); int(i) < genesis.NumValidators(); i++ {
if s.exitPool.HasBeenIncluded(i) {
n++
}
}
if n != 3 {
t.Errorf("Did not mark the correct number of exits. Got %d but wanted %d", n, 3)
}
},
},
{
name: "notifies block processed on state feed",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, db.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.beaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := stateutil.BlockRoot(gBlk.Block)
s.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
root, err := stateutil.BlockRoot(tt.args.block.Block)
require.NoError(t, err)
if err := s.ReceiveBlock(ctx, tt.args.block, root); (err != nil) != tt.wantErr {
t.Errorf("ReceiveBlock() error = %v, wantErr %v", err, tt.wantErr)
} else {
tt.check(t, s)
}
})
}
}
func TestService_ReceiveBlockInitialSync(t *testing.T) {
ctx := context.Background()
genesis, keys := testutil.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
if err != nil {
t.Error(err)
}
return blk
}
type args struct {
block *ethpb.SignedBeaconBlock
}
tests := []struct {
name string
args args
wantErr bool
check func(*testing.T, *Service)
}{
{
name: "applies block with state transition",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
},
},
{
name: "notifies block processed on state feed",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.beaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := stateutil.BlockRoot(gBlk.Block)
s.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
root, err := stateutil.BlockRoot(tt.args.block.Block)
require.NoError(t, err)
if err := s.ReceiveBlockInitialSync(ctx, tt.args.block, root); (err != nil) != tt.wantErr {
t.Errorf("ReceiveBlockInitialSync() error = %v, wantErr %v", err, tt.wantErr)
} else {
tt.check(t, s)
}
})
}
}
func TestService_ReceiveBlockBatch(t *testing.T) {
ctx := context.Background()
genesis, keys := testutil.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
if err != nil {
t.Error(err)
}
return blk
}
type args struct {
block *ethpb.SignedBeaconBlock
}
tests := []struct {
name string
args args
wantErr bool
check func(*testing.T, *Service)
}{
{
name: "applies block with state transition",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
},
check: func(t *testing.T, s *Service) {
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
},
},
{
name: "notifies block processed on state feed",
args: args{
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.beaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := stateutil.BlockRoot(gBlk.Block)
s.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
root, err := stateutil.BlockRoot(tt.args.block.Block)
require.NoError(t, err)
blks := []*ethpb.SignedBeaconBlock{tt.args.block}
roots := [][32]byte{root}
if err := s.ReceiveBlockBatch(ctx, blks, roots); (err != nil) != tt.wantErr {
t.Errorf("ReceiveBlockBatch() error = %v, wantErr %v", err, tt.wantErr)
} else {
tt.check(t, s)
}
})
}
}
func TestService_HasInitSyncBlock(t *testing.T) {
s, err := NewService(context.Background(), &Config{})
require.NoError(t, err)
r := [32]byte{'a'}
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
}
s.saveInitSyncBlock(r, testutil.NewBeaconBlock())
if !s.HasInitSyncBlock(r) {
t.Error("Should have block")
}
}

View File

@@ -15,13 +15,11 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
@@ -35,8 +33,8 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"go.opencensus.io/trace"
)
@@ -53,13 +51,11 @@ type Service struct {
exitPool *voluntaryexits.Pool
genesisTime time.Time
p2p p2p.Broadcaster
maxRoutines int64
maxRoutines int
head *head
headLock sync.RWMutex
stateNotifier statefeed.Notifier
genesisRoot [32]byte
epochParticipation map[uint64]*precompute.Balance
epochParticipationLock sync.RWMutex
forkChoiceStore f.ForkChoicer
justifiedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
@@ -79,6 +75,8 @@ type Service struct {
initSyncBlocksLock sync.RWMutex
recentCanonicalBlocks map[[32]byte]bool
recentCanonicalBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
}
// Config options for the service.
@@ -91,7 +89,7 @@ type Config struct {
ExitPool *voluntaryexits.Pool
SlashingPool *slashings.Pool
P2p p2p.Broadcaster
MaxRoutines int64
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
OpsService *attestations.Service
@@ -114,7 +112,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
p2p: cfg.P2p,
maxRoutines: cfg.MaxRoutines,
stateNotifier: cfg.StateNotifier,
epochParticipation: make(map[uint64]*precompute.Balance),
forkChoiceStore: cfg.ForkChoiceStore,
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
@@ -123,6 +120,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
stateGen: cfg.StateGen,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
recentCanonicalBlocks: make(map[[32]byte]bool),
justifiedBalances: make([]uint64, 0),
}, nil
}
@@ -143,16 +141,9 @@ func (s *Service) Start() {
}
if beaconState == nil {
if featureconfig.Get().NewStateMgmt {
beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
} else {
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
}
@@ -167,6 +158,14 @@ func (s *Service) Start() {
if err := s.initializeChainInfo(ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
// We start a counter to genesis, if needed.
gState, err := s.beaconDB.GenesisState(ctx)
if err != nil {
log.Fatalf("Could not retrieve genesis state: %v", err)
}
go slotutil.CountdownToGenesis(ctx, s.genesisTime, uint64(gState.NumValidators()))
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(ctx)
if err != nil {
log.Fatalf("Could not get justified checkpoint: %v", err)
@@ -178,20 +177,15 @@ func (s *Service) Start() {
// Resume fork choice.
s.justifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
log.Fatalf("Could not cache justified state balances: %v", err)
}
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
if !featureconfig.Get().NewStateMgmt {
if finalizedCheckpoint.Epoch > 1 {
if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil {
log.WithError(err).Warn("Could not prune old states")
}
}
}
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
@@ -245,6 +239,11 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
if err != nil {
log.Fatalf("Could not initialize beacon chain: %v", err)
}
// We start a counter to genesis, if needed.
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()))
// We send out a state initialized event to the rest of the services
// running in the beacon node.
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
@@ -303,7 +302,7 @@ func (s *Service) Stop() error {
// Status always returns nil unless there is an error condition that causes
// this service to be unhealthy.
func (s *Service) Status() error {
if runtime.NumGoroutine() > int(s.maxRoutines) {
if runtime.NumGoroutine() > s.maxRoutines {
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
}
return nil
@@ -331,32 +330,32 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
}); err != nil {
return err
}
} else {
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
}); err != nil {
return err
}
s.stateGen.SaveFinalizedState(0, genesisBlkRoot, genesisState)
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
}
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could save genesis block root")
return errors.Wrap(err, "could not save genesis block root")
}
// Finalized checkpoint at genesis is a zero hash.
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.justifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
return err
}
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
@@ -421,41 +420,20 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
var finalizedState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
finalizedState, err = s.stateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if !featureconfig.Get().SkipRegenHistoricalStates {
// Since historical states were skipped, the node should start from last finalized check point.
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
if finalizedRoot == params.BeaconConfig().ZeroHash {
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
}
}
} else {
finalizedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
finalizedState, err = s.stateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
if finalizedRoot == params.BeaconConfig().ZeroHash {
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
}
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
if featureconfig.Get().NewStateMgmt && featureconfig.Get().SkipRegenHistoricalStates {
// To skip the regeneration of historical state, the node has to generate the parent of the last finalized state.
parentRoot := bytesutil.ToBytes32(finalizedBlock.Block.ParentRoot)
parentState, err := s.generateState(ctx, finalizedRoot, parentRoot)
if err != nil {
return err
}
if s.beaconDB.SaveState(ctx, parentState, parentRoot) != nil {
return err
}
}
if finalizedState == nil || finalizedBlock == nil {
return errors.New("finalized state and block can't be nil")
}
@@ -464,29 +442,6 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
return nil
}
// This is called when a client starts from a non-genesis slot. It deletes the states in DB
// from slot 1 (avoid genesis state) to `slot`.
func (s *Service) pruneGarbageState(ctx context.Context, slot uint64) error {
if featureconfig.Get().DontPruneStateStartUp {
return nil
}
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(slot)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
return err
}
if err := s.beaconDB.SaveLastArchivedIndex(ctx, 0); err != nil {
return err
}
return nil
}
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
// information to fork choice service to initializes fork choice store.
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {

View File

@@ -6,6 +6,7 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/sirupsen/logrus"
)
@@ -15,16 +16,12 @@ func init() {
}
func TestChainService_SaveHead_DataRace(t *testing.T) {
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
}
go func() {
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
if err := s.saveHead(context.Background(), [32]byte{}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
@@ -35,6 +34,8 @@ import (
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -72,7 +73,7 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
var _ = p2p.Broadcaster(&mockBroadcaster{})
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummaryCache) *Service {
endpoint := "http://127.0.0.1"
ctx := context.Background()
var web3Service *powchain.Service
@@ -93,39 +94,35 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
},
DepositContainers: []*protodb.DepositContainer{},
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
BeaconDB: beaconDB,
HTTPEndPoint: endpoint,
DepositContract: common.Address{},
})
if err != nil {
t.Fatalf("unable to set up web3 service: %v", err)
}
require.NoError(t, err, "Unable to set up web3 service")
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
depositCache, err := depositcache.NewDepositCache()
require.NoError(t, err)
cfg := &Config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
DepositCache: depositcache.NewDepositCache(),
DepositCache: depositCache,
ChainStartFetcher: web3Service,
P2p: &mockBroadcaster{},
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
StateGen: stategen.New(beaconDB, cache.NewStateSummaryCache()),
StateGen: stategen.New(beaconDB, sc),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
OpsService: opsService,
}
chainService, err := NewService(ctx, cfg)
if err != nil {
t.Fatalf("unable to setup chain service: %v", err)
}
require.NoError(t, err, "Unable to setup chain service")
chainService.genesisTime = time.Unix(1, 0) // non-zero time
return chainService
@@ -133,8 +130,8 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
func TestChainStartStop_Uninitialized(t *testing.T) {
hook := logTest.NewGlobal()
db := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db)
db, sc := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
// Listen for state events.
stateSubChannel := make(chan *feed.Event, 1)
@@ -168,19 +165,13 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
stateSub.Unsubscribe()
beaconState, err := db.HeadState(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if beaconState == nil || beaconState.Slot() != 0 {
t.Error("Expected canonical state feed to send a state with genesis block")
}
if err := chainService.Stop(); err != nil {
t.Fatalf("Unable to stop chain service: %v", err)
}
require.NoError(t, chainService.Stop(), "Unable to stop chain service")
// The context should have been canceled.
if chainService.ctx.Err() != context.Canceled {
t.Error("Context was not canceled")
}
assert.Equal(t, context.Canceled, chainService.ctx.Err(), "Context was not canceled")
testutil.AssertLogsContain(t, hook, "Waiting")
testutil.AssertLogsContain(t, hook, "Initialized beacon chain genesis state")
}
@@ -188,95 +179,62 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
func TestChainStartStop_Initialized(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db)
chainService := setupBeaconChain(t, db, sc)
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := stateutil.BlockRoot(genesisBlk.Block)
if err != nil {
t.Fatal(err)
}
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
t.Fatal(err)
}
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
s := testutil.NewBeaconState()
if err := s.SetSlot(1); err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, s, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}); err != nil {
t.Fatal(err)
}
require.NoError(t, s.SetSlot(1))
require.NoError(t, db.SaveState(ctx, s, blkRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
// Test the start function.
chainService.Start()
if err := chainService.Stop(); err != nil {
t.Fatalf("unable to stop chain service: %v", err)
}
require.NoError(t, chainService.Stop(), "Unable to stop chain service")
// The context should have been canceled.
if chainService.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
assert.Equal(t, context.Canceled, chainService.ctx.Err(), "Context was not canceled")
testutil.AssertLogsContain(t, hook, "data already exists")
}
func TestChainService_InitializeBeaconChain(t *testing.T) {
db := testDB.SetupDB(t)
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
ctx := context.Background()
bc := setupBeaconChain(t, db)
bc := setupBeaconChain(t, db, sc)
var err error
// Set up 10 deposits pre chain start for validators to register
count := uint64(10)
deposits, _, err := testutil.DeterministicDepositsAndKeys(count)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
hashTreeRoot := trie.HashTreeRoot()
genState, err := state.EmptyGenesisState()
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
err = genState.SetEth1Data(&ethpb.Eth1Data{
DepositRoot: hashTreeRoot[:],
DepositCount: uint64(len(deposits)),
})
for _, deposit := range deposits {
genState, err = b.ProcessPreGenesisDeposit(ctx, genState, deposit)
if err != nil {
t.Fatal(err)
}
}
genState, err = b.ProcessPreGenesisDeposits(ctx, genState, deposits)
require.NoError(t, err)
if _, err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, &ethpb.Eth1Data{
DepositRoot: hashTreeRoot[:],
}); err != nil {
t.Fatal(err)
}
_, err = bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, &ethpb.Eth1Data{DepositRoot: hashTreeRoot[:]})
require.NoError(t, err)
if _, err := bc.HeadState(ctx); err != nil {
t.Error(err)
}
_, err = bc.HeadState(ctx)
assert.NoError(t, err)
headBlk, err := bc.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if headBlk == nil {
t.Error("Head state can't be nil after initialize beacon chain")
}
@@ -286,164 +244,74 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
}
func TestChainService_InitializeChainInfo(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
ctx := context.Background()
genesis := testutil.NewBeaconBlock()
genesisRoot, err := stateutil.BlockRoot(genesis.Block)
if err != nil {
t.Fatal(err)
}
if err := db.SaveGenesisBlockRoot(ctx, genesisRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveBlock(ctx, genesis); err != nil {
t.Fatal(err)
}
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, genesis))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := testutil.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
headState := testutil.NewBeaconState()
if err := headState.SetSlot(finalizedSlot); err != nil {
t.Fatal(err)
}
if err := headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]); err != nil {
t.Fatal(err)
}
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := stateutil.BlockRoot(headBlock.Block)
if err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, headState, headRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveState(ctx, headState, genesisRoot); err != nil {
t.Fatal(err)
}
if err := db.SaveBlock(ctx, headBlock); err != nil {
t.Fatal(err)
}
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, headState, headRoot))
require.NoError(t, db.SaveState(ctx, headState, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, headBlock))
if err := db.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: helpers.SlotToEpoch(finalizedSlot),
Root: headRoot[:],
}); err != nil {
t.Fatal(err)
}
c := &Service{beaconDB: db, stateGen: stategen.New(db, cache.NewStateSummaryCache())}
if err := c.initializeChainInfo(ctx); err != nil {
t.Fatal(err)
}
c := &Service{beaconDB: db, stateGen: stategen.New(db, sc)}
require.NoError(t, c.initializeChainInfo(ctx))
headBlk, err := c.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(headBlk, headBlock) {
t.Error("head block incorrect")
}
require.NoError(t, err)
assert.DeepEqual(t, headBlock, headBlk, "Head block incorrect")
s, err := c.HeadState(ctx)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
t.Error("head state incorrect")
}
if headBlock.Block.Slot != c.HeadSlot() {
t.Error("head slot incorrect")
}
require.NoError(t, err)
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
r, err := c.HeadRoot(context.Background())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !bytes.Equal(headRoot[:], r) {
t.Error("head slot incorrect")
}
if c.genesisRoot != genesisRoot {
t.Error("genesis block root incorrect")
}
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
}
func TestChainService_SaveHeadNoDB(t *testing.T) {
db := testDB.SetupDB(t)
db, sc := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
beaconDB: db,
stateGen: stategen.New(db, cache.NewStateSummaryCache()),
stateGen: stategen.New(db, sc),
}
b := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 1}}
r, err := ssz.HashTreeRoot(b)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
newState := testutil.NewBeaconState()
if err := s.stateGen.SaveState(ctx, r, newState); err != nil {
t.Fatal(err)
}
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
t.Fatal(err)
}
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, b, r))
newB, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if reflect.DeepEqual(newB, b) {
t.Error("head block should not be equal")
}
}
func TestChainService_PruneOldStates(t *testing.T) {
db := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
beaconDB: db,
}
for i := 0; i < 100; i++ {
block := &ethpb.BeaconBlock{Slot: uint64(i)}
if err := s.beaconDB.SaveBlock(ctx, &ethpb.SignedBeaconBlock{Block: block}); err != nil {
t.Fatal(err)
}
r, err := stateutil.BlockRoot(block)
if err != nil {
t.Fatal(err)
}
newState := testutil.NewBeaconState()
if err := newState.SetSlot(uint64(i)); err != nil {
t.Fatal(err)
}
if err := s.beaconDB.SaveState(ctx, newState, r); err != nil {
t.Fatal(err)
}
}
// Delete half of the states.
if err := s.pruneGarbageState(ctx, 50); err != nil {
t.Fatal(err)
}
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(100)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
t.Fatal(err)
}
for i := 1; i < 50; i++ {
s, err := s.beaconDB.State(ctx, roots[i])
if err != nil {
t.Fatal(err)
}
if s != nil {
t.Errorf("wanted nil for slot %d", i)
}
}
}
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
db, _ := testDB.SetupDB(t)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{},
@@ -451,53 +319,36 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := stateutil.BlockRoot(block.Block)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{}}
state, err := beaconstate.InitializeFromProto(bs)
if err != nil {
t.Fatal(err)
}
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
t.Fatal(err)
}
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
if s.hasBlock(ctx, [32]byte{}) {
t.Error("Should not have block")
}
if !s.hasBlock(ctx, r) {
t.Error("Should have block")
}
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
}
func BenchmarkHasBlockDB(b *testing.B) {
db := testDB.SetupDB(b)
db, _ := testDB.SetupDB(b)
ctx := context.Background()
s := &Service{
beaconDB: db,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
if err := s.beaconDB.SaveBlock(ctx, block); err != nil {
b.Fatal(err)
}
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
r, err := stateutil.BlockRoot(block.Block)
if err != nil {
b.Fatal(err)
}
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !s.beaconDB.HasBlock(ctx, r) {
b.Fatal("Block is not in DB")
}
require.Equal(b, true, s.beaconDB.HasBlock(ctx, r), "Block is not in DB")
}
}
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
db := testDB.SetupDB(b)
db, _ := testDB.SetupDB(b)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{},
@@ -505,22 +356,14 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := stateutil.BlockRoot(block.Block)
if err != nil {
b.Fatal(err)
}
require.NoError(b, err)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{}}
state, err := beaconstate.InitializeFromProto(bs)
if err != nil {
b.Fatal(err)
}
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
b.Fatal(err)
}
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !s.forkChoiceStore.HasNode(r) {
b.Fatal("Block is not in fork choice store")
}
require.Equal(b, true, s.forkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}

View File

@@ -11,6 +11,7 @@ go_library(
],
deps = [
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",

View File

@@ -5,11 +5,13 @@ package testing
import (
"bytes"
"context"
"sync"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
@@ -44,6 +46,7 @@ type ChainService struct {
opNotifier opfeed.Notifier
ValidAttestation bool
ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error
}
// StateNotifier mocks the same method in the chain service.
@@ -78,12 +81,40 @@ func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
// MockStateNotifier mocks the state notifier.
type MockStateNotifier struct {
feed *event.Feed
recv []*feed.Event
recvLock sync.Mutex
recvCh chan *feed.Event
RecordEvents bool
}
// ReceivedEvents returns the events received by the state feed in this mock.
func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
msn.recvLock.Lock()
defer msn.recvLock.Unlock()
return msn.recv
}
// StateFeed returns a state feed.
func (msn *MockStateNotifier) StateFeed() *event.Feed {
if msn.feed == nil {
if msn.feed == nil && msn.recvCh == nil {
msn.feed = new(event.Feed)
if msn.RecordEvents {
msn.recvCh = make(chan *feed.Event)
sub := msn.feed.Subscribe(msn.recvCh)
go func() {
select {
case evt := <-msn.recvCh:
msn.recvLock.Lock()
msn.recv = append(msn.recv, evt)
msn.recvLock.Unlock()
case <-sub.Err():
sub.Unsubscribe()
}
}()
}
}
return msn.feed
}
@@ -109,23 +140,64 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
return mon.feed
}
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
}
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
return err
}
ms.BlocksReceived = append(ms.BlocksReceived, block)
signingRoot, err := stateutil.BlockRoot(block.Block)
if err != nil {
return err
}
if ms.DB != nil {
if err := ms.DB.SaveBlock(ctx, block); err != nil {
return err
}
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
}
ms.Root = signingRoot[:]
ms.Block = block
return nil
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, roots [][32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
for _, block := range blks {
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
}
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
return err
}
ms.BlocksReceived = append(ms.BlocksReceived, block)
signingRoot, err := stateutil.BlockRoot(block.Block)
if err != nil {
return err
}
if ms.DB != nil {
if err := ms.DB.SaveBlock(ctx, block); err != nil {
return err
}
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
}
ms.Root = signingRoot[:]
ms.Block = block
}
return nil
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
return nil
}
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
return nil
}
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
return nil
}
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
@@ -211,7 +283,7 @@ func (ms *ChainService) AttestationPreState(ctx context.Context, att *ethpb.Atte
}
// HeadValidatorsIndices mocks the same method in the chain service.
func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
if ms.State == nil {
return []uint64{}, nil
}
@@ -219,7 +291,7 @@ func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
}
// HeadSeed mocks the same method in the chain service.
func (ms *ChainService) HeadSeed(epoch uint64) ([32]byte, error) {
func (ms *ChainService) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
}
@@ -276,3 +348,8 @@ func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
return [32]byte{}
}
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
func (ms *ChainService) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
return ms.VerifyBlkDescendantErr
}

View File

@@ -58,6 +58,8 @@ go_test(
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
)
func TestAttestationCache_RoundTrip(t *testing.T) {
@@ -19,34 +20,20 @@ func TestAttestationCache_RoundTrip(t *testing.T) {
}
response, err := c.Get(ctx, req)
if err != nil {
t.Error(err)
}
assert.NoError(t, err)
assert.Equal(t, (*ethpb.AttestationData)(nil), response)
if response != nil {
t.Errorf("Empty cache returned an object: %v", response)
}
if err := c.MarkInProgress(req); err != nil {
t.Error(err)
}
assert.NoError(t, c.MarkInProgress(req))
res := &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 5},
}
if err = c.Put(ctx, req, res); err != nil {
t.Error(err)
}
if err := c.MarkNotInProgress(req); err != nil {
t.Error(err)
}
assert.NoError(t, c.Put(ctx, req, res))
assert.NoError(t, c.MarkNotInProgress(req))
response, err = c.Get(ctx, req)
if err != nil {
t.Error(err)
}
assert.NoError(t, err)
if !proto.Equal(response, res) {
t.Error("Expected equal protos to return from cache")

View File

@@ -20,7 +20,7 @@ var (
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
// window to accept attestation based on latest spec.
maxCheckpointStateSize = 10
maxCheckpointStateSize = uint64(10)
// Metrics.
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{

View File

@@ -1,7 +1,6 @@
package cache
import (
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
@@ -11,6 +10,8 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
@@ -18,31 +19,23 @@ func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 64,
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
info := &CheckpointState{
Checkpoint: cp,
State: st,
}
key, err := checkpointState(info)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
wantedKey, err := hashutil.HashProto(cp)
if err != nil {
t.Fatal(err)
}
if key != string(wantedKey[:]) {
t.Errorf("Incorrect hash key: %s, expected %s", key, string(wantedKey[:]))
}
require.NoError(t, err)
assert.Equal(t, string(wantedKey[:]), key)
}
func TestCheckpointStateCacheKeyFn_InvalidObj(t *testing.T) {
_, err := checkpointState("bad")
if err != ErrNotCheckpointState {
t.Errorf("Expected error %v, got %v", ErrNotCheckpointState, err)
}
assert.Equal(t, ErrNotCheckpointState, err)
}
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
@@ -53,28 +46,21 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
Slot: 64,
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
info1 := &CheckpointState{
Checkpoint: cp1,
State: st,
}
state, err := cache.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
if state != nil {
t.Error("Expected state not to exist in empty cache")
}
require.NoError(t, err)
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Expected state not to exist in empty cache")
require.NoError(t, cache.AddCheckpointState(info1))
if err := cache.AddCheckpointState(info1); err != nil {
t.Fatal(err)
}
state, err = cache.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if !proto.Equal(state.InnerStateUnsafe(), info1.State.InnerStateUnsafe()) {
t.Error("incorrectly cached state")
}
@@ -83,31 +69,21 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 128,
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
info2 := &CheckpointState{
Checkpoint: cp2,
State: st2,
}
if err := cache.AddCheckpointState(info2); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCheckpointState(info2))
state, err = cache.StateByCheckpoint(cp2)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(state.CloneInnerState(), info2.State.CloneInnerState()) {
t.Error("incorrectly cached state")
}
require.NoError(t, err)
assert.DeepEqual(t, info2.State.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
state, err = cache.StateByCheckpoint(cp1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(state.CloneInnerState(), info1.State.CloneInnerState()) {
t.Error("incorrectly cached state")
}
require.NoError(t, err)
assert.DeepEqual(t, info1.State.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {
@@ -115,27 +91,17 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 0,
})
if err != nil {
t.Fatal(err)
}
for i := 0; i < maxCheckpointStateSize+100; i++ {
if err := st.SetSlot(uint64(i)); err != nil {
t.Fatal(err)
}
require.NoError(t, err)
for i := uint64(0); i < maxCheckpointStateSize+100; i++ {
require.NoError(t, st.SetSlot(i))
info := &CheckpointState{
Checkpoint: &ethpb.Checkpoint{Epoch: uint64(i)},
Checkpoint: &ethpb.Checkpoint{Epoch: i},
State: st,
}
if err := c.AddCheckpointState(info); err != nil {
t.Fatal(err)
}
require.NoError(t, c.AddCheckpointState(info))
}
if len(c.cache.ListKeys()) != maxCheckpointStateSize {
t.Errorf(
"Expected hash cache key size to be %d, got %d",
maxCheckpointStateSize,
len(c.cache.ListKeys()),
)
}
assert.Equal(t, maxCheckpointStateSize, uint64(len(c.cache.ListKeys())))
}

View File

@@ -19,7 +19,7 @@ var (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
// cache size as it considers 3 concurrent branches over 3 epochs.
maxCommitteesCacheSize = 10
maxCommitteesCacheSize = uint64(10)
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
@@ -96,7 +96,7 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
start, end := startEndIndices(item, indexOffSet)
if int(end) > len(item.ShuffledIndices) || end < start {
if end > uint64(len(item.ShuffledIndices)) || end < start {
return nil, errors.New("requested index out of bound")
}
@@ -169,6 +169,30 @@ func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
return item.SortedIndices, nil
}
// ActiveIndicesCount returns the active indices count of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return 0, err
}
if exists {
CommitteeCacheHit.Inc()
} else {
CommitteeCacheMiss.Inc()
return 0, nil
}
item, ok := obj.(*Committees)
if !ok {
return 0, ErrNotCommittee
}
return len(item.SortedIndices), nil
}
// ProposerIndices returns the proposer indices of a given seed.
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
c.lock.RLock()

View File

@@ -1,10 +1,11 @@
package cache
import (
"reflect"
"testing"
fuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestCommitteeKeyFuzz_OK(t *testing.T) {
@@ -14,12 +15,8 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
k, err := committeeKeyFn(c)
if err != nil {
t.Fatal(err)
}
if k != key(c.Seed) {
t.Errorf("Incorrect hash k: %s, expected %s", k, key(c.Seed))
}
require.NoError(t, err)
assert.Equal(t, key(c.Seed), k)
}
}
@@ -30,17 +27,12 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
if err := cache.AddCommitteeShuffledList(c); err != nil {
t.Fatal(err)
}
if _, err := cache.Committee(0, c.Seed, 0); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCommitteeShuffledList(c))
_, err := cache.Committee(0, c.Seed, 0)
require.NoError(t, err)
}
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
t.Error("Incorrect key size")
}
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
@@ -50,19 +42,12 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
if err := cache.AddCommitteeShuffledList(c); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCommitteeShuffledList(c))
indices, err := cache.ActiveIndices(c.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, c.SortedIndices) {
t.Error("Saved indices not the same")
}
require.NoError(t, err)
assert.DeepEqual(t, c.SortedIndices, indices)
}
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
t.Error("Incorrect key size")
}
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
}

View File

@@ -2,13 +2,14 @@ package cache
import (
"math"
"reflect"
"sort"
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestCommitteeKeyFn_OK(t *testing.T) {
@@ -19,19 +20,13 @@ func TestCommitteeKeyFn_OK(t *testing.T) {
}
k, err := committeeKeyFn(item)
if err != nil {
t.Fatal(err)
}
if k != key(item.Seed) {
t.Errorf("Incorrect hash k: %s, expected %s", k, key(item.Seed))
}
require.NoError(t, err)
assert.Equal(t, key(item.Seed), k)
}
func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
_, err := committeeKeyFn("bad")
if err != ErrNotCommittee {
t.Errorf("Expected error %v, got %v", ErrNotCommittee, err)
}
assert.Equal(t, ErrNotCommittee, err)
}
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
@@ -46,30 +41,18 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
slot := params.BeaconConfig().SlotsPerEpoch
committeeIndex := uint64(1)
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
}
require.NoError(t, cache.AddCommitteeShuffledList(item))
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
wantedIndex := uint64(0)
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
start, end := startEndIndices(item, wantedIndex)
if !reflect.DeepEqual(indices, item.ShuffledIndices[start:end]) {
t.Errorf(
"Expected fetched active indices to be %v, got %v",
indices,
item.ShuffledIndices[start:end],
)
}
assert.DeepEqual(t, item.ShuffledIndices[start:end], indices)
}
func TestCommitteeCache_ActiveIndices(t *testing.T) {
@@ -77,24 +60,31 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
indices, err := cache.ActiveIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
t.Error("Expected committee not to exist in empty cache")
}
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCommitteeShuffledList(item))
indices, err = cache.ActiveIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, item.SortedIndices) {
t.Error("Did not receive correct active indices from cache")
}
require.NoError(t, err)
assert.DeepEqual(t, item.SortedIndices, indices)
}
func TestCommitteeCache_ActiveCount(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
count, err := cache.ActiveIndicesCount(item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
require.NoError(t, cache.AddCommitteeShuffledList(item))
count, err = cache.ActiveIndicesCount(item.Seed)
require.NoError(t, err)
assert.Equal(t, len(item.SortedIndices), count)
}
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
@@ -103,45 +93,29 @@ func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
seed := [32]byte{'A'}
indices := []uint64{1, 2, 3, 4, 5}
indices, err := cache.ProposerIndices(seed)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
if err := cache.AddProposerIndicesList(seed, indices); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddProposerIndicesList(seed, indices))
received, err := cache.ProposerIndices(seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, received) {
t.Error("Did not receive correct proposer indices from cache")
}
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCommitteeShuffledList(item))
indices, err = cache.ProposerIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
if err := cache.AddProposerIndicesList(item.Seed, indices); err != nil {
t.Fatal(err)
}
received, err = cache.ProposerIndices(item.Seed)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(indices, received) {
t.Error("Did not receive correct proposer indices from cache")
}
require.NoError(t, cache.AddProposerIndicesList(item.Seed, indices))
received, err = cache.ProposerIndices(item.Seed)
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
}
func TestCommitteeCache_CanRotate(t *testing.T) {
@@ -151,27 +125,20 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
for i := 100; i < 200; i++ {
s := []byte(strconv.Itoa(i))
item := &Committees{Seed: bytesutil.ToBytes32(s)}
if err := cache.AddCommitteeShuffledList(item); err != nil {
t.Fatal(err)
}
require.NoError(t, cache.AddCommitteeShuffledList(item))
}
k := cache.CommitteeCache.ListKeys()
if len(k) != maxCommitteesCacheSize {
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
}
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
sort.Slice(k, func(i, j int) bool {
return k[i] < k[j]
})
s := bytesutil.ToBytes32([]byte(strconv.Itoa(190)))
if k[0] != key(s) {
t.Error("incorrect key received for slot 190")
}
assert.Equal(t, key(s), k[0], "incorrect key received for slot 190")
s = bytesutil.ToBytes32([]byte(strconv.Itoa(199)))
if k[len(k)-1] != key(s) {
t.Error("incorrect key received for slot 199")
}
assert.Equal(t, key(s), k[len(k)-1], "incorrect key received for slot 199")
}
func TestCommitteeCacheOutOfRange(t *testing.T) {
@@ -184,11 +151,8 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
SortedIndices: []uint64{},
ProposerIndices: []uint64{},
})
if err != nil {
t.Error(err)
}
require.NoError(t, err)
_, err = cache.Committee(0, seed, math.MaxUint64) // Overflow!
if err == nil {
t.Fatal("Did not fail as expected")
}
require.NotNil(t, err, "Did not fail as expected")
}

View File

@@ -8,12 +8,12 @@ import (
var (
// maxCacheSize is 4x of the epoch length for additional cache padding.
// Requests should be only accessing committees within defined epoch length.
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch)
maxCacheSize = 4 * params.BeaconConfig().SlotsPerEpoch
)
// trim the FIFO queue to the maxSize.
func trim(queue *cache.FIFO, maxSize int) {
for s := len(queue.ListKeys()); s > maxSize; s-- {
func trim(queue *cache.FIFO, maxSize uint64) {
for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
_, err := queue.Pop(popProcessNoopFunc)
if err != nil {
// popProcessNoopFunc never returns an error, but we handle this anyway to make linter

View File

@@ -13,9 +13,12 @@ go_library(
"//proto/beacon/db:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -24,15 +27,20 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"deposits_test.go",
"deposits_cache_test.go",
"pending_deposits_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/beacon/db:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -15,8 +15,11 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -30,9 +33,18 @@ var (
// DepositFetcher defines a struct which can retrieve deposit information from a store.
type DepositFetcher interface {
AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit
AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
}
// FinalizedDeposits stores the trie of deposits that have been included
// in the beacon state up to the latest finalized checkpoint.
type FinalizedDeposits struct {
Deposits *trieutil.SparseMerkleTrie
MerkleTrieIndex int64
}
// DepositCache stores all in-memory deposit objects. This
@@ -41,19 +53,28 @@ type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*dbpb.DepositContainer
deposits []*dbpb.DepositContainer
finalizedDeposits *FinalizedDeposits
depositsLock sync.RWMutex
chainStartDeposits []*ethpb.Deposit
chainStartPubkeys map[string]bool
}
// NewDepositCache instantiates a new deposit cache
func NewDepositCache() *DepositCache {
func NewDepositCache() (*DepositCache, error) {
finalizedDepositsTrie, err := trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
if err != nil {
return nil, err
}
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
// Inserting the first item into the trie will set the value of the index to 0.
return &DepositCache{
pendingDeposits: []*dbpb.DepositContainer{},
deposits: []*dbpb.DepositContainer{},
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
chainStartPubkeys: make(map[string]bool),
chainStartDeposits: make([]*ethpb.Deposit, 0),
}
}, nil
}
// InsertDeposit into the database. If deposit or block number are nil
@@ -91,9 +112,40 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
historicalDepositsCount.Add(float64(len(ctrs)))
}
// AllDepositContainers returns a list of deposits all historical deposit containers until the given block number.
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
depositTrie := dc.finalizedDeposits.Deposits
insertIndex := dc.finalizedDeposits.MerkleTrieIndex + 1
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
continue
}
if d.Index > eth1DepositIndex {
break
}
depHash, err := ssz.HashTreeRoot(d.Deposit.Data)
if err != nil {
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
return
}
depositTrie.Insert(depHash[:], int(insertIndex))
insertIndex++
}
dc.finalizedDeposits = &FinalizedDeposits{
Deposits: depositTrie,
MerkleTrieIndex: eth1DepositIndex,
}
}
// AllDepositContainers returns all historical deposit containers.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDepositContainers")
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@@ -119,9 +171,9 @@ func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) b
return false
}
// AllDeposits returns a list of deposits all historical deposits until the given block number
// AllDeposits returns a list of historical deposits until the given block number
// (inclusive). If no block is specified then this method returns all historical deposits.
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
defer span.End()
dc.depositsLock.RLock()
@@ -129,7 +181,7 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*
var deposits []*ethpb.Deposit
for _, ctnr := range dc.deposits {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
deposits = append(deposits, ctnr.Deposit)
}
}
@@ -171,3 +223,39 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
}
return deposit, blockNum
}
// FinalizedDeposits returns the finalized deposits trie.
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return &FinalizedDeposits{
Deposits: dc.finalizedDeposits.Deposits.Copy(),
MerkleTrieIndex: dc.finalizedDeposits.MerkleTrieIndex,
}
}
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
if dc.finalizedDeposits == nil {
return dc.AllDeposits(ctx, untilBlk)
}
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}
return deposits
}

View File

@@ -0,0 +1,483 @@
package depositcache
import (
"bytes"
"context"
"fmt"
"math/big"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/trieutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
const nilDepositErr = "Ignoring nil deposit insertion"
var _ = DepositFetcher(&DepositCache{})
func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
hook := logTest.NewGlobal()
dc, err := NewDepositCache()
require.NoError(t, err)
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
require.Equal(t, 0, len(dc.deposits), "Number of deposits changed")
assert.Equal(t, nilDepositErr, hook.LastEntry().Message)
}
func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
insertions := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 3,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 4,
},
}
for _, ins := range insertions {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
expectedIndices := []int64{0, 1, 3, 4}
for i, ei := range expectedIndices {
assert.Equal(t, ei, dc.deposits[i].Index,
fmt.Sprintf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei))
}
}
func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
deposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), nil)
assert.Equal(t, len(deposits), len(d))
}
func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
deposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), big.NewInt(11))
assert.Equal(t, 5, len(d))
}
func TestDepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
assert.Equal(t, 5, int(n))
assert.Equal(t, bytesutil.ToBytes32([]byte("root")), root)
}
func TestDepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
assert.Equal(t, 0, int(n))
assert.Equal(t, [32]byte{}, root)
}
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 9,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk0"),
},
},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
},
},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
},
},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk2"),
},
},
},
}
dep, blkNum := dc.DepositByPubkey(context.Background(), []byte("pk1"))
if !bytes.Equal(dep.Data.PublicKey, []byte("pk1")) {
t.Error("Returned wrong deposit")
}
assert.Equal(t, 0, blkNum.Cmp(big.NewInt(10)),
fmt.Sprintf("Returned wrong block number %v", blkNum))
}
func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
finalizedDeposits := []*dbpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{0},
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{1},
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{2},
},
},
Index: 2,
},
}
dc.deposits = append(finalizedDeposits, &dbpb.DepositContainer{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{3},
},
},
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
var deps [][]byte
for _, d := range finalizedDeposits {
hash, err := ssz.HashTreeRoot(d.Deposit.Data)
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
oldFinalizedDeposits := []*dbpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{0},
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{1},
},
},
Index: 1,
},
}
newFinalizedDeposit := dbpb.DepositContainer{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{2},
},
},
Index: 2,
}
dc.deposits = oldFinalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 1)
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
dc.deposits = []*dbpb.DepositContainer{&newFinalizedDeposit}
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
var deps [][]byte
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
hash, err := ssz.HashTreeRoot(d.Deposit.Data)
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
finalizedDeposits := dc.finalizedDeposits
assert.NotNil(t, finalizedDeposits)
assert.NotNil(t, finalizedDeposits.Deposits)
assert.Equal(t, int64(-1), finalizedDeposits.MerkleTrieIndex)
}
func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
finalizedDeposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{0},
},
},
Index: 0,
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{1},
},
},
Index: 1,
},
}
dc.deposits = append(finalizedDeposits,
&dbpb.DepositContainer{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{2},
},
},
Index: 2,
},
&dbpb.DepositContainer{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{3},
},
},
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), nil)
assert.Equal(t, 2, len(deps))
}
func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *testing.T) {
dc, err := NewDepositCache()
require.NoError(t, err)
finalizedDeposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{0},
},
},
Index: 0,
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{1},
},
},
Index: 1,
},
}
dc.deposits = append(finalizedDeposits,
&dbpb.DepositContainer{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{2},
},
},
Index: 2,
},
&dbpb.DepositContainer{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{3},
},
},
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
assert.Equal(t, 1, len(deps))
}

View File

@@ -1,275 +0,0 @@
package depositcache
import (
"bytes"
"context"
"math/big"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
const nilDepositErr = "Ignoring nil deposit insertion"
var _ = DepositFetcher(&DepositCache{})
func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
hook := logTest.NewGlobal()
dc := DepositCache{}
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
if len(dc.deposits) != 0 {
t.Fatal("Number of deposits changed")
}
if hook.LastEntry().Message != nilDepositErr {
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
}
}
func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
dc := DepositCache{}
insertions := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 3,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
index: 4,
},
}
for _, ins := range insertions {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
expectedIndices := []int64{0, 1, 3, 4}
for i, ei := range expectedIndices {
if dc.deposits[i].Index != ei {
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
}
}
}
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
dc := DepositCache{}
deposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), nil)
if len(d) != len(deposits) {
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), len(deposits))
}
}
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
dc := DepositCache{}
deposits := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), big.NewInt(11))
expected := 5
if len(d) != expected {
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), expected)
}
}
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{},
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
if int(n) != 5 {
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 5)
}
if root != bytesutil.ToBytes32([]byte("root")) {
t.Errorf("Returned unexpected root: %v", root)
}
}
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{},
DepositRoot: []byte("root"),
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
if int(n) != 0 {
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 0)
}
if root != [32]byte{} {
t.Errorf("Returned unexpected root: %v", root)
}
}
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
dc := DepositCache{}
dc.deposits = []*dbpb.DepositContainer{
{
Eth1BlockHeight: 9,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk0"),
},
},
},
{
Eth1BlockHeight: 10,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
},
},
},
{
Eth1BlockHeight: 11,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk1"),
},
},
},
{
Eth1BlockHeight: 12,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte("pk2"),
},
},
},
}
dep, blkNum := dc.DepositByPubkey(context.Background(), []byte("pk1"))
if !bytes.Equal(dep.Data.PublicKey, []byte("pk1")) {
t.Error("Returned wrong deposit")
}
if blkNum.Cmp(big.NewInt(10)) != 0 {
t.Errorf("Returned wrong block number %v", blkNum)
}
}

View File

@@ -24,7 +24,7 @@ var (
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer
PendingContainers(ctx context.Context, untilBlk *big.Int) []*dbpb.DepositContainer
}
// InsertPendingDeposit into the database. If deposit or block number are nil
@@ -50,36 +50,23 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.
func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var depositCntrs []*dbpb.DepositContainer
for _, ctnr := range dc.pendingDeposits {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
// Sort the deposits by Merkle index.
sort.SliceStable(depositCntrs, func(i, j int) bool {
return depositCntrs[i].Index < depositCntrs[j].Index
})
depositCntrs := dc.PendingContainers(ctx, untilBlk)
var deposits []*ethpb.Deposit
for _, dep := range depositCntrs {
deposits = append(deposits, dep.Deposit)
}
span.AddAttributes(trace.Int64Attribute("count", int64(len(deposits))))
return deposits
}
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer {
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*dbpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
@@ -87,7 +74,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.In
var depositCntrs []*dbpb.DepositContainer
for _, ctnr := range dc.pendingDeposits {
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
@@ -141,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
}
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int) {
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
defer span.End()
@@ -155,7 +142,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
var cleanDeposits []*dbpb.DepositContainer
for _, dp := range dc.pendingDeposits {
if dp.Index >= int64(merkleTreeIndex) {
if dp.Index >= merkleTreeIndex {
cleanDeposits = append(cleanDeposits, dp)
}
}

View File

@@ -3,13 +3,13 @@ package depositcache
import (
"context"
"math/big"
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
)
var _ = PendingDepositsFetcher(&DepositCache{})
@@ -18,18 +18,14 @@ func TestInsertPendingDeposit_OK(t *testing.T) {
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
if len(dc.pendingDeposits) != 1 {
t.Error("Deposit not inserted")
}
assert.Equal(t, 1, len(dc.pendingDeposits), "Deposit not inserted")
}
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
if len(dc.pendingDeposits) > 0 {
t.Error("Unexpected deposit insertion")
}
assert.Equal(t, 0, len(dc.pendingDeposits))
}
func TestRemovePendingDeposit_OK(t *testing.T) {
@@ -55,9 +51,7 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
if len(dc.pendingDeposits) != 1 {
t.Errorf("Deposit unexpectedly removed")
}
assert.Equal(t, 1, len(dc.pendingDeposits), "Deposit unexpectedly removed")
}
func TestPendingDeposit_RoundTrip(t *testing.T) {
@@ -67,9 +61,7 @@ func TestPendingDeposit_RoundTrip(t *testing.T) {
dep := &ethpb.Deposit{Proof: proof}
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
if len(dc.pendingDeposits) != 0 {
t.Error("Failed to insert & delete a pending deposit")
}
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
}
func TestPendingDeposits_OK(t *testing.T) {
@@ -86,15 +78,10 @@ func TestPendingDeposits_OK(t *testing.T) {
{Proof: [][]byte{[]byte("A")}},
{Proof: [][]byte{[]byte("B")}},
}
if !reflect.DeepEqual(deposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", deposits, expected)
}
assert.DeepEqual(t, expected, deposits)
all := dc.PendingDeposits(context.Background(), nil)
if len(all) != len(dc.pendingDeposits) {
t.Error("PendingDeposits(ctx, nil) did not return all deposits")
}
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
}
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
@@ -118,9 +105,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
}
assert.DeepEqual(t, expected, dc.pendingDeposits)
}
func TestPrunePendingDeposits_OK(t *testing.T) {
@@ -143,9 +128,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
}
assert.DeepEqual(t, expected, dc.pendingDeposits)
dc.pendingDeposits = []*dbpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
@@ -162,8 +145,5 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
}
assert.DeepEqual(t, expected, dc.pendingDeposits)
}

View File

@@ -1,6 +1,8 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -24,6 +26,7 @@ var (
// HotStateCache is used to store the processed beacon state after finalized check point..
type HotStateCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewHotStateCache initializes the map and underlying cache.
@@ -40,6 +43,8 @@ func NewHotStateCache() *HotStateCache {
// Get returns a cached response via input block root, if any.
// The response is copied by default.
func (c *HotStateCache) Get(root [32]byte) *stateTrie.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
item, exists := c.cache.Get(root)
if exists && item != nil {
@@ -52,6 +57,8 @@ func (c *HotStateCache) Get(root [32]byte) *stateTrie.BeaconState {
// GetWithoutCopy returns a non-copied cached response via input block root.
func (c *HotStateCache) GetWithoutCopy(root [32]byte) *stateTrie.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
item, exists := c.cache.Get(root)
if exists && item != nil {
hotStateCacheHit.Inc()
@@ -63,15 +70,21 @@ func (c *HotStateCache) GetWithoutCopy(root [32]byte) *stateTrie.BeaconState {
// Put the response in the cache.
func (c *HotStateCache) Put(root [32]byte, state *stateTrie.BeaconState) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(root, state)
}
// Has returns true if the key exists in the cache.
func (c *HotStateCache) Has(root [32]byte) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.cache.Contains(root)
}
// Delete deletes the key exists in the cache.
func (c *HotStateCache) Delete(root [32]byte) bool {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Remove(root)
}

View File

@@ -1,46 +1,34 @@
package cache_test
import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestHotStateCache_RoundTrip(t *testing.T) {
c := cache.NewHotStateCache()
root := [32]byte{'A'}
state := c.Get(root)
if state != nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if c.Has(root) {
t.Error("Empty cache has an object")
}
assert.Equal(t, (*stateTrie.BeaconState)(nil), state)
assert.Equal(t, false, c.Has(root), "Empty cache has an object")
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
if err != nil {
t.Fatal(err)
}
c.Put(root, state)
require.NoError(t, err)
c.Put(root, state)
assert.Equal(t, true, c.Has(root), "Empty cache does not have an object")
if !c.Has(root) {
t.Error("Empty cache does not have an object")
}
res := c.Get(root)
if state == nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
t.Error("Expected equal protos to return from cache")
}
assert.NotNil(t, state)
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
c.Delete(root)
if c.Has(root) {
t.Error("Cache not suppose to have the object")
}
assert.Equal(t, false, c.Has(root), "Cache not supposed to have the object")
}

View File

@@ -2,12 +2,13 @@ package cache_test
import (
"context"
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSkipSlotCache_RoundTrip(t *testing.T) {
@@ -15,39 +16,20 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
c := cache.NewSkipSlotCache()
state, err := c.Get(ctx, 5)
if err != nil {
t.Error(err)
}
require.NoError(t, err)
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Empty cache returned an object")
if state != nil {
t.Errorf("Empty cache returned an object: %v", state)
}
if err := c.MarkInProgress(5); err != nil {
t.Error(err)
}
require.NoError(t, c.MarkInProgress(5))
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if err = c.Put(ctx, 5, state); err != nil {
t.Error(err)
}
if err := c.MarkNotInProgress(5); err != nil {
t.Error(err)
}
require.NoError(t, c.Put(ctx, 5, state))
require.NoError(t, c.MarkNotInProgress(5))
res, err := c.Get(ctx, 5)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
t.Error("Expected equal protos to return from cache")
}
require.NoError(t, err)
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
}

View File

@@ -25,12 +25,12 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2)
attesterCache, err := lru.New(cacheSize)
cacheSize := params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2
attesterCache, err := lru.New(int(cacheSize))
if err != nil {
panic(err)
}
aggregatorCache, err := lru.New(cacheSize)
aggregatorCache, err := lru.New(int(cacheSize))
if err != nil {
panic(err)
}

View File

@@ -1,58 +1,44 @@
package cache
import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSubnetIDsCache_RoundTrip(t *testing.T) {
c := newSubnetIDs()
slot := uint64(100)
committeeIDs := c.GetAggregatorSubnetIDs(slot)
if len(committeeIDs) != 0 {
t.Errorf("Empty cache returned an object: %v", committeeIDs)
}
assert.Equal(t, 0, len(committeeIDs), "Empty cache returned an object")
c.AddAggregatorSubnetID(slot, 1)
res := c.GetAggregatorSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{1}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{1}, res)
c.AddAggregatorSubnetID(slot, 2)
res = c.GetAggregatorSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{1, 2}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{1, 2}, res)
c.AddAggregatorSubnetID(slot, 3)
res = c.GetAggregatorSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{1, 2, 3}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{1, 2, 3}, res)
committeeIDs = c.GetAttesterSubnetIDs(slot)
if len(committeeIDs) != 0 {
t.Errorf("Empty cache returned an object: %v", committeeIDs)
}
assert.Equal(t, 0, len(committeeIDs), "Empty cache returned an object")
c.AddAttesterSubnetID(slot, 11)
res = c.GetAttesterSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{11}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{11}, res)
c.AddAttesterSubnetID(slot, 22)
res = c.GetAttesterSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{11, 22}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{11, 22}, res)
c.AddAttesterSubnetID(slot, 33)
res = c.GetAttesterSubnetIDs(slot)
if !reflect.DeepEqual(res, []uint64{11, 22, 33}) {
t.Error("Expected equal value to return from cache")
}
assert.DeepEqual(t, []uint64{11, 22, 33}, res)
}
func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
@@ -65,7 +51,7 @@ func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
}
for i := 0; i < 20; i++ {
for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)}
idxs, ok, _ := c.GetPersistentSubnets(pubkey[:])
@@ -73,12 +59,8 @@ func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue
}
if int(idxs[0]) != i {
t.Fatalf("Wanted index of %d but got %d", i, idxs[0])
}
require.Equal(t, i, idxs[0])
}
coms := c.GetAllSubnets()
if len(coms) != 20 {
t.Errorf("Number of committees is not %d but is %d", 20, len(coms))
}
assert.Equal(t, 20, len(coms))
}

View File

@@ -4,8 +4,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
srcs = [
"block.go",
"block_operations.go",
"attestation.go",
"attester_slashing.go",
"deposit.go",
"eth1_data.go",
"exit.go",
"genesis.go",
"header.go",
"log.go",
"proposer_slashing.go",
"randao.go",
"signature.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks",
visibility = [
@@ -40,11 +49,17 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"attestation_test.go",
"attester_slashing_test.go",
"block_operations_fuzz_test.go",
"block_operations_test.go",
"block_regression_test.go",
"block_test.go",
"deposit_test.go",
"eth1_data_test.go",
"exit_test.go",
"genesis_test.go",
"header_test.go",
"proposer_slashing_test.go",
"randao_test.go",
],
embed = [":go_default_library"],
deps = [
@@ -52,6 +67,8 @@ go_test(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/aggregation:go_default_library",
"//shared/aggregation/attestations:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
@@ -64,6 +81,5 @@ go_test(
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
],
)

View File

@@ -0,0 +1,319 @@
package blocks
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ProcessAttestations applies processing operations to a block's inner attestation
// records.
func ProcessAttestations(
ctx context.Context,
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
var err error
for idx, attestation := range body.Attestations {
beaconState, err = ProcessAttestation(ctx, beaconState, attestation)
if err != nil {
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
}
}
return beaconState, nil
}
// ProcessAttestation verifies an input attestation can pass through processing using the given beacon state.
//
// Spec pseudocode definition:
// def process_attestation(state: BeaconState, attestation: Attestation) -> None:
// data = attestation.data
// assert data.index < get_committee_count_at_slot(state, data.slot)
// assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
// assert data.target.epoch == compute_epoch_at_slot(data.slot)
// assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
//
// committee = get_beacon_committee(state, data.slot, data.index)
// assert len(attestation.aggregation_bits) == len(committee)
//
// pending_attestation = PendingAttestation(
// data=data,
// aggregation_bits=attestation.aggregation_bits,
// inclusion_delay=state.slot - data.slot,
// proposer_index=get_beacon_proposer_index(state),
// )
//
// if data.target.epoch == get_current_epoch(state):
// assert data.source == state.current_justified_checkpoint
// state.current_epoch_attestations.append(pending_attestation)
// else:
// assert data.source == state.previous_justified_checkpoint
// state.previous_epoch_attestations.append(pending_attestation)
//
// # Check signature
// assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
func ProcessAttestation(
ctx context.Context,
beaconState *stateTrie.BeaconState,
att *ethpb.Attestation,
) (*stateTrie.BeaconState, error) {
beaconState, err := ProcessAttestationNoVerify(ctx, beaconState, att)
if err != nil {
return nil, err
}
return beaconState, VerifyAttestation(ctx, beaconState, att)
}
// ProcessAttestationsNoVerify applies processing operations to a block's inner attestation
// records. The only difference would be that the attestation signature would not be verified.
func ProcessAttestationsNoVerify(
ctx context.Context,
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
var err error
for idx, attestation := range body.Attestations {
beaconState, err = ProcessAttestationNoVerify(ctx, beaconState, attestation)
if err != nil {
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
}
}
return beaconState, nil
}
// ProcessAttestationNoVerify processes the attestation without verifying the attestation signature. This
// method is used to validate attestations whose signatures have already been verified.
func ProcessAttestationNoVerify(
ctx context.Context,
beaconState *stateTrie.BeaconState,
att *ethpb.Attestation,
) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerify")
defer span.End()
if att == nil || att.Data == nil || att.Data.Target == nil {
return nil, errors.New("nil attestation data target")
}
currEpoch := helpers.SlotToEpoch(beaconState.Slot())
var prevEpoch uint64
if currEpoch == 0 {
prevEpoch = 0
} else {
prevEpoch = currEpoch - 1
}
data := att.Data
if data.Target.Epoch != prevEpoch && data.Target.Epoch != currEpoch {
return nil, fmt.Errorf(
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
data.Target.Epoch,
prevEpoch,
currEpoch,
)
}
if helpers.SlotToEpoch(data.Slot) != data.Target.Epoch {
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(data.Slot), data.Target.Epoch)
}
s := att.Data.Slot
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
if !minInclusionCheck {
return nil, fmt.Errorf(
"attestation slot %d + inclusion delay %d > state slot %d",
s,
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
}
if !epochInclusionCheck {
return nil, fmt.Errorf(
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
beaconState.Slot(),
s,
params.BeaconConfig().SlotsPerEpoch,
)
}
if err := helpers.VerifyAttestationBitfieldLengths(beaconState, att); err != nil {
return nil, errors.Wrap(err, "could not verify attestation bitfields")
}
proposerIndex, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
return nil, err
}
pendingAtt := &pb.PendingAttestation{
Data: data,
AggregationBits: att.AggregationBits,
InclusionDelay: beaconState.Slot() - s,
ProposerIndex: proposerIndex,
}
var ffgSourceEpoch uint64
var ffgSourceRoot []byte
var ffgTargetEpoch uint64
if data.Target.Epoch == currEpoch {
ffgSourceEpoch = beaconState.CurrentJustifiedCheckpoint().Epoch
ffgSourceRoot = beaconState.CurrentJustifiedCheckpoint().Root
ffgTargetEpoch = currEpoch
if err := beaconState.AppendCurrentEpochAttestations(pendingAtt); err != nil {
return nil, err
}
} else {
ffgSourceEpoch = beaconState.PreviousJustifiedCheckpoint().Epoch
ffgSourceRoot = beaconState.PreviousJustifiedCheckpoint().Root
ffgTargetEpoch = prevEpoch
if err := beaconState.AppendPreviousEpochAttestations(pendingAtt); err != nil {
return nil, err
}
}
if data.Source.Epoch != ffgSourceEpoch {
return nil, fmt.Errorf("expected source epoch %d, received %d", ffgSourceEpoch, data.Source.Epoch)
}
if !bytes.Equal(data.Source.Root, ffgSourceRoot) {
return nil, fmt.Errorf("expected source root %#x, received %#x", ffgSourceRoot, data.Source.Root)
}
if data.Target.Epoch != ffgTargetEpoch {
return nil, fmt.Errorf("expected target epoch %d, received %d", ffgTargetEpoch, data.Target.Epoch)
}
return beaconState, nil
}
// VerifyIndexedAttestation determines the validity of an indexed attestation.
//
// Spec pseudocode definition:
// def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
// """
// Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
// """
// # Verify indices are sorted and unique
// indices = indexed_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(set(indices)):
// return False
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.BeaconState, indexedAtt *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
defer span.End()
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
return err
}
domain, err := helpers.Domain(beaconState.Fork(), indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
indices := indexedAtt.AttestingIndices
pubkeys := []bls.PublicKey{}
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
if err != nil {
return errors.Wrap(err, "could not deserialize validator public key")
}
pubkeys = append(pubkeys, pk)
}
return attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain)
}
// VerifyAttestation converts and attestation into an indexed attestation and verifies
// the signature in that attestation.
func VerifyAttestation(ctx context.Context, beaconState *stateTrie.BeaconState, att *ethpb.Attestation) error {
if att == nil || att.Data == nil {
return fmt.Errorf("nil or missing attestation data: %v", att)
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
}
// VerifyAttestations will verify the signatures of the provided attestations. This method performs
// a single BLS verification call to verify the signatures of all of the provided attestations. All
// of the provided attestations must have valid signatures or this method will return an error.
// This method does not determine which attestation signature is invalid, only that one or more
// attestation signatures were not valid.
func VerifyAttestations(ctx context.Context, beaconState *stateTrie.BeaconState, atts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestations")
defer span.End()
span.AddAttributes(trace.Int64Attribute("attestations", int64(len(atts))))
if len(atts) == 0 {
return nil
}
fork := beaconState.Fork()
gvr := beaconState.GenesisValidatorRoot()
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.
var preForkAtts []*ethpb.Attestation
var postForkAtts []*ethpb.Attestation
for _, a := range atts {
if helpers.SlotToEpoch(a.Data.Slot) < fork.Epoch {
preForkAtts = append(preForkAtts, a)
} else {
postForkAtts = append(postForkAtts, a)
}
}
// Check attestations from before the fork.
if fork.Epoch > 0 { // Check to prevent underflow.
prevDomain, err := helpers.Domain(fork, fork.Epoch-1, dt, gvr)
if err != nil {
return err
}
if err := verifyAttestationsWithDomain(ctx, beaconState, preForkAtts, prevDomain); err != nil {
return err
}
} else if len(preForkAtts) > 0 {
// This is a sanity check that preForkAtts were not ignored when fork.Epoch == 0. This
// condition is not possible, but it doesn't hurt to check anyway.
return errors.New("some attestations were not verified from previous fork before genesis")
}
// Then check attestations from after the fork.
currDomain, err := helpers.Domain(fork, fork.Epoch, dt, gvr)
if err != nil {
return err
}
return verifyAttestationsWithDomain(ctx, beaconState, postForkAtts, currDomain)
}
// Inner method to verify attestations. This abstraction allows for the domain to be provided as an
// argument.
func verifyAttestationsWithDomain(ctx context.Context, beaconState *stateTrie.BeaconState, atts []*ethpb.Attestation, domain []byte) error {
if len(atts) == 0 {
return nil
}
set, err := createAttestationSignatureSet(ctx, beaconState, atts, domain)
if err != nil {
return err
}
verify, err := bls.VerifyMultipleSignatures(set.Signatures, set.Messages, set.PublicKeys)
if err != nil {
return errors.Errorf("got error in multiple verification: %v", err)
}
if !verify {
return errors.New("one or more attestation signatures did not verify")
}
return nil
}

View File

@@ -0,0 +1,959 @@
package blocks_test
import (
"context"
"fmt"
"reflect"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/aggregation"
attaggregation "github.com/prysmaticlabs/prysm/shared/aggregation/attestations"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0},
Slot: 5,
},
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: attestations,
},
}
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
want := fmt.Sprintf(
"attestation slot %d + inclusion delay %d > state slot %d",
attestations[0].Data.Slot,
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
_, err := blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}}}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{att},
},
}
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch*4 + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
if err := beaconState.SetPreviousJustifiedCheckpoint(pfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetPreviousEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
want := fmt.Sprintf(
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
att.Data.Target.Epoch,
helpers.PrevEpoch(beaconState),
helpers.CurrentEpoch(beaconState),
)
_, err = blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0},
Source: &ethpb.Checkpoint{Epoch: 1},
},
AggregationBits: aggBits,
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: attestations,
},
}
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
if err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay); err != nil {
t.Fatal(err)
}
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
if err := beaconState.SetCurrentJustifiedCheckpoint(cfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
want := fmt.Sprintf(
"expected source epoch %d, received %d",
helpers.CurrentEpoch(beaconState),
attestations[0].Data.Source.Epoch,
)
_, err := blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
block.Body.Attestations[0].Data.Source.Epoch = helpers.CurrentEpoch(beaconState)
block.Body.Attestations[0].Data.Source.Root = []byte{}
want = fmt.Sprintf(
"expected source root %#x, received %#x",
beaconState.CurrentJustifiedCheckpoint().Root,
attestations[0].Data.Source.Root,
)
_, err = blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Epoch: 1},
Slot: params.BeaconConfig().SlotsPerEpoch,
},
AggregationBits: aggBits,
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: attestations,
},
}
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
if err := beaconState.SetPreviousJustifiedCheckpoint(pfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetPreviousEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
want := fmt.Sprintf(
"expected source epoch %d, received %d",
helpers.PrevEpoch(beaconState),
attestations[0].Data.Source.Epoch,
)
_, err = blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
block.Body.Attestations[0].Data.Source.Epoch = helpers.PrevEpoch(beaconState)
block.Body.Attestations[0].Data.Target.Epoch = helpers.CurrentEpoch(beaconState)
block.Body.Attestations[0].Data.Source.Root = []byte{}
want = fmt.Sprintf(
"expected source root %#x, received %#x",
beaconState.CurrentJustifiedCheckpoint().Root,
attestations[0].Data.Source.Root,
)
_, err = blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
aggBits := bitfield.NewBitlist(4)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}},
AggregationBits: aggBits,
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{att},
},
}
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
if err := beaconState.SetCurrentJustifiedCheckpoint(cfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
_, err = blocks.ProcessAttestations(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), expected) {
t.Errorf("Did not receive wanted error")
}
}
func TestProcessAttestations_OK(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
},
AggregationBits: aggBits,
}
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
if err := beaconState.SetCurrentJustifiedCheckpoint(cfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
t.Error(err)
}
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain)
if err != nil {
t.Error(err)
}
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sig := privKeys[indice].Sign(hashTreeRoot[:])
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{att},
},
}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
if _, err := blocks.ProcessAttestations(context.Background(), beaconState, block.Body); err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
data := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
}
aggBits1 := bitfield.NewBitlist(4)
aggBits1.SetBitAt(0, true)
aggBits1.SetBitAt(1, true)
aggBits1.SetBitAt(2, true)
att1 := &ethpb.Attestation{
Data: data,
AggregationBits: aggBits1,
}
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
if err := beaconState.SetCurrentJustifiedCheckpoint(cfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
if err != nil {
t.Error(err)
}
sigs := make([]bls.Signature, len(attestingIndices1))
for i, indice := range attestingIndices1 {
sig := privKeys[indice].Sign(hashTreeRoot[:])
sigs[i] = sig
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
aggBits2 := bitfield.NewBitlist(4)
aggBits2.SetBitAt(1, true)
aggBits2.SetBitAt(2, true)
aggBits2.SetBitAt(3, true)
att2 := &ethpb.Attestation{
Data: data,
AggregationBits: aggBits2,
}
committee, err = helpers.BeaconCommitteeFromState(beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
hashTreeRoot, err = helpers.ComputeSigningRoot(data, domain)
if err != nil {
t.Error(err)
}
sigs = make([]bls.Signature, len(attestingIndices2))
for i, indice := range attestingIndices2 {
sig := privKeys[indice].Sign(hashTreeRoot[:])
sigs[i] = sig
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
if _, err = attaggregation.AggregatePair(att1, att2); err != aggregation.ErrBitsOverlap {
t.Error("Did not receive wanted error")
}
}
func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 300)
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
data := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
}
aggBits1 := bitfield.NewBitlist(9)
aggBits1.SetBitAt(0, true)
aggBits1.SetBitAt(1, true)
att1 := &ethpb.Attestation{
Data: data,
AggregationBits: aggBits1,
}
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
if err := beaconState.SetCurrentJustifiedCheckpoint(cfc); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
hashTreeRoot, err := helpers.ComputeSigningRoot(data, domain)
if err != nil {
t.Error(err)
}
sigs := make([]bls.Signature, len(attestingIndices1))
for i, indice := range attestingIndices1 {
sig := privKeys[indice].Sign(hashTreeRoot[:])
sigs[i] = sig
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
aggBits2 := bitfield.NewBitlist(9)
aggBits2.SetBitAt(2, true)
aggBits2.SetBitAt(3, true)
att2 := &ethpb.Attestation{
Data: data,
AggregationBits: aggBits2,
}
committee, err = helpers.BeaconCommitteeFromState(beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
if err != nil {
t.Fatal(err)
}
hashTreeRoot, err = helpers.ComputeSigningRoot(data, domain)
if err != nil {
t.Error(err)
}
sigs = make([]bls.Signature, len(attestingIndices2))
for i, indice := range attestingIndices2 {
sig := privKeys[indice].Sign(hashTreeRoot[:])
sigs[i] = sig
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
aggregatedAtt, err := attaggregation.AggregatePair(att1, att2)
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{aggregatedAtt},
},
}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
if _, err := blocks.ProcessAttestations(context.Background(), beaconState, block.Body); err != nil {
t.Error(err)
}
}
func TestProcessAttestationsNoVerify_IncorrectSlotTargetEpoch(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisState(t, 1)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Target: &ethpb.Checkpoint{},
},
}
wanted := fmt.Sprintf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(att.Data.Slot), att.Data.Target.Epoch)
_, err := blocks.ProcessAttestationNoVerify(context.TODO(), beaconState, att)
if err == nil || err.Error() != wanted {
t.Error("Did not get wanted error")
}
}
func TestProcessAttestationsNoVerify_OK(t *testing.T) {
// Attestation with an empty signature
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(1, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0},
},
AggregationBits: aggBits,
}
zeroSig := [96]byte{}
att.Signature = zeroSig[:]
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
if err != nil {
t.Fatal(err)
}
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
if err := beaconState.SetCurrentJustifiedCheckpoint(ckp); err != nil {
t.Fatal(err)
}
if err := beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
t.Fatal(err)
}
if _, err := blocks.ProcessAttestationNoVerify(context.TODO(), beaconState, att); err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 5,
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
tests := []struct {
aggregationBitfield bitfield.Bitlist
wantedAttestingIndices []uint64
}{
{
aggregationBitfield: bitfield.Bitlist{0x07},
wantedAttestingIndices: []uint64{43, 47},
},
{
aggregationBitfield: bitfield.Bitlist{0x03},
wantedAttestingIndices: []uint64{47},
},
{
aggregationBitfield: bitfield.Bitlist{0x01},
wantedAttestingIndices: []uint64{},
},
}
var sig [96]byte
copy(sig[:], "signed")
attestation := &ethpb.Attestation{
Signature: sig[:],
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0},
Target: &ethpb.Checkpoint{Epoch: 0},
},
}
for _, tt := range tests {
attestation.AggregationBits = tt.aggregationBitfield
wanted := &ethpb.IndexedAttestation{
AttestingIndices: tt.wantedAttestingIndices,
Data: attestation.Data,
Signature: attestation.Signature,
}
committee, err := helpers.BeaconCommitteeFromState(state, attestation.Data.Slot, attestation.Data.CommitteeIndex)
if err != nil {
t.Error(err)
}
ia := attestationutil.ConvertToIndexed(context.Background(), attestation, committee)
if !reflect.DeepEqual(wanted, ia) {
t.Error("convert attestation to indexed attestation didn't result as wanted")
}
}
}
func TestVerifyIndexedAttestation_OK(t *testing.T) {
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
if err != nil {
t.Fatal(err)
}
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 5,
Validators: validators,
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
tests := []struct {
attestation *ethpb.IndexedAttestation
}{
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 2,
},
},
AttestingIndices: []uint64{1},
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 1,
},
},
AttestingIndices: []uint64{47, 99, 101},
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 4,
},
},
AttestingIndices: []uint64{21, 72},
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 7,
},
},
AttestingIndices: []uint64{100, 121, 122},
}},
}
for _, tt := range tests {
domain, err := helpers.Domain(state.Fork(), tt.attestation.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
root, err := helpers.ComputeSigningRoot(tt.attestation.Data, domain)
if err != nil {
t.Error(err)
}
var sig []bls.Signature
for _, idx := range tt.attestation.AttestingIndices {
validatorSig := keys[idx].Sign(root[:])
sig = append(sig, validatorSig)
}
aggSig := bls.AggregateSignatures(sig)
marshalledSig := aggSig.Marshal()
tt.attestation.Signature = marshalledSig
err = blocks.VerifyIndexedAttestation(context.Background(), state, tt.attestation)
if err != nil {
t.Errorf("failed to verify indexed attestation: %v", err)
}
}
}
func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
indexedAtt1 := &ethpb.IndexedAttestation{
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+5),
}
for i := uint64(0); i < params.BeaconConfig().MaxValidatorsPerCommittee+5; i++ {
indexedAtt1.AttestingIndices[i] = i
indexedAtt1.Data = &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: i,
},
}
}
want := "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE"
err := blocks.VerifyIndexedAttestation(context.Background(), &stateTrie.BeaconState{}, indexedAtt1)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected verification to fail return false, received: %v", err)
}
}
func TestVerifyAttestations_VerifiesMultipleAttestations(t *testing.T) {
ctx := context.Background()
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
if err != nil {
t.Fatal(err)
}
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
}
}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 5,
Validators: validators,
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
},
Signature: nil,
}
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
if err != nil {
t.Fatal(err)
}
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att2 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
Signature: nil,
}
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
if err != nil {
t.Fatal(err)
}
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
if err := blocks.VerifyAttestations(ctx, st, []*ethpb.Attestation{att1, att2}); err != nil {
t.Fatal(err)
}
}
func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
// In this test, att1 is from the prior fork and att2 is from the new fork.
ctx := context.Background()
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
if err != nil {
t.Fatal(err)
}
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
}
}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 35,
Validators: validators,
Fork: &pb.Fork{
Epoch: 1,
CurrentVersion: []byte{0, 1, 2, 3},
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
},
Signature: nil,
}
prevDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
root, err := helpers.ComputeSigningRoot(att1.Data, prevDomain)
if err != nil {
t.Fatal(err)
}
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att2 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
CommitteeIndex: 1,
},
Signature: nil,
}
currDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
root, err = helpers.ComputeSigningRoot(att2.Data, currDomain)
if err != nil {
t.Fatal(err)
}
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
if err := blocks.VerifyAttestations(ctx, st, []*ethpb.Attestation{att1, att2}); err != nil {
t.Fatal(err)
}
}
func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) {
ctx := context.Background()
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
if err != nil {
t.Fatal(err)
}
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
}
}
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 5,
Validators: validators,
Fork: &pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
},
Signature: nil,
}
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
if err != nil {
t.Fatal(err)
}
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
if err != nil {
t.Fatal(err)
}
att2 := &ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
Signature: nil,
}
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
if err != nil {
t.Fatal(err)
}
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
if err != nil {
t.Fatal(err)
}
verified, err := set.Verify()
if err != nil {
t.Fatal(err)
}
if !verified {
t.Error("Multiple signatures were unable to be verified.")
}
}

View File

@@ -0,0 +1,129 @@
package blocks
import (
"context"
"sort"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
// ProcessAttesterSlashings is one of the operations performed
// on each processed beacon block to slash attesters based on
// Casper FFG slashing conditions if any slashable events occurred.
//
// Spec pseudocode definition:
// def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
// attestation_1 = attester_slashing.attestation_1
// attestation_2 = attester_slashing.attestation_2
// assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
// assert is_valid_indexed_attestation(state, attestation_1)
// assert is_valid_indexed_attestation(state, attestation_2)
//
// slashed_any = False
// indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
// for index in sorted(indices):
// if is_slashable_validator(state.validators[index], get_current_epoch(state)):
// slash_validator(state, index)
// slashed_any = True
// assert slashed_any
func ProcessAttesterSlashings(
ctx context.Context,
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
for idx, slashing := range body.AttesterSlashings {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrapf(err, "could not verify attester slashing %d", idx)
}
slashableIndices := slashableAttesterIndices(slashing)
sort.SliceStable(slashableIndices, func(i, j int) bool {
return slashableIndices[i] < slashableIndices[j]
})
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
var err error
var slashedAny bool
var val *ethpb.Validator
for _, validatorIndex := range slashableIndices {
val, err = beaconState.ValidatorAtIndex(validatorIndex)
if err != nil {
return nil, err
}
if helpers.IsSlashableValidator(val, currentEpoch) {
beaconState, err = v.SlashValidator(beaconState, validatorIndex)
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
}
slashedAny = true
}
}
if !slashedAny {
return nil, errors.New("unable to slash any validator despite confirmed attester slashing")
}
}
return beaconState, nil
}
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
func VerifyAttesterSlashing(ctx context.Context, beaconState *stateTrie.BeaconState, slashing *ethpb.AttesterSlashing) error {
if slashing == nil {
return errors.New("nil slashing")
}
if slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
return errors.New("nil attestation")
}
if slashing.Attestation_1.Data == nil || slashing.Attestation_2.Data == nil {
return errors.New("nil attestation data")
}
att1 := slashing.Attestation_1
att2 := slashing.Attestation_2
data1 := att1.Data
data2 := att2.Data
if !IsSlashableAttestationData(data1, data2) {
return errors.New("attestations are not slashable")
}
if err := VerifyIndexedAttestation(ctx, beaconState, att1); err != nil {
return errors.Wrap(err, "could not validate indexed attestation")
}
if err := VerifyIndexedAttestation(ctx, beaconState, att2); err != nil {
return errors.Wrap(err, "could not validate indexed attestation")
}
return nil
}
// IsSlashableAttestationData verifies a slashing against the Casper Proof of Stake FFG rules.
//
// Spec pseudocode definition:
// def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
// """
// Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
// """
// return (
// # Double vote
// (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
// # Surround vote
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
// )
func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.AttestationData) bool {
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
return false
}
isDoubleVote := !attestationutil.AttDataIsEqual(data1, data2) && data1.Target.Epoch == data2.Target.Epoch
isSurroundVote := data1.Source.Epoch < data2.Source.Epoch && data2.Target.Epoch < data1.Target.Epoch
return isDoubleVote || isSurroundVote
}
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
return nil
}
indices1 := slashing.Attestation_1.AttestingIndices
indices2 := slashing.Attestation_2.AttestingIndices
return sliceutil.IntersectionUint64(indices1, indices2)
}

View File

@@ -0,0 +1,201 @@
package blocks_test
import (
"context"
"fmt"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestSlashableAttestationData_CanSlash(t *testing.T) {
att1 := &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1},
Source: &ethpb.Checkpoint{Root: []byte{'A'}},
}
att2 := &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1},
Source: &ethpb.Checkpoint{Root: []byte{'B'}},
}
if !blocks.IsSlashableAttestationData(att1, att2) {
t.Error("atts should have been slashable")
}
att1.Target.Epoch = 4
att1.Source.Epoch = 2
att2.Source.Epoch = 3
if !blocks.IsSlashableAttestationData(att1, att2) {
t.Error("atts should have been slashable")
}
}
func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0},
Target: &ethpb.Checkpoint{Epoch: 0},
},
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Epoch: 1},
},
},
},
}
registry := []*ethpb.Validator{}
currentSlot := uint64(0)
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Slot: currentSlot,
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
want := fmt.Sprint("attestations are not slashable")
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
registry := []*ethpb.Validator{}
currentSlot := uint64(0)
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Slot: currentSlot,
})
if err != nil {
t.Fatal(err)
}
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Epoch: 0},
},
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0},
Target: &ethpb.Checkpoint{Epoch: 0},
},
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
},
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
want := fmt.Sprint("validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE")
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch
}
att1 := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Epoch: 0},
},
AttestingIndices: []uint64{0, 1},
}
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()[:]
att2 := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0},
Target: &ethpb.Checkpoint{Epoch: 0},
},
AttestingIndices: []uint64{0, 1},
}
signingRoot, err = helpers.ComputeSigningRoot(att2.Data, domain)
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()[:]
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
if err := beaconState.SetSlot(currentSlot); err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, block.Body)
if err != nil {
t.Fatal(err)
}
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -90,8 +90,8 @@ func TestFuzzareEth1DataEqual_10000(t *testing.T) {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(eth1data2)
areEth1DataEqual(eth1data, eth1data2)
areEth1DataEqual(eth1data, eth1data)
AreEth1DataEqual(eth1data, eth1data2)
AreEth1DataEqual(eth1data, eth1data)
}
}
@@ -323,7 +323,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessDeposits(ctx, s, blockBody)
r, err := ProcessDeposits(ctx, s, blockBody.Deposits)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
}
@@ -340,7 +340,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessPreGenesisDeposit(ctx, s, deposit)
r, err := ProcessPreGenesisDeposits(ctx, s, []*eth.Deposit{deposit})
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
@@ -356,7 +356,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
r, err := ProcessDeposit(s, deposit)
r, err := ProcessDeposit(s, deposit, true)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}

File diff suppressed because it is too large Load Diff

View File

@@ -50,7 +50,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
aggSigs := []*bls.Signature{}
aggSigs := []bls.Signature{}
for _, index := range setA {
sig := privKeys[index].Sign(signingRoot[:])
aggSigs = append(aggSigs, sig)
@@ -70,7 +70,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
aggSigs = []*bls.Signature{}
aggSigs = []bls.Signature{}
for _, index := range setB {
sig := privKeys[index].Sign(signingRoot[:])
aggSigs = append(aggSigs, sig)

View File

@@ -0,0 +1,294 @@
package blocks
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
)
// ProcessPreGenesisDeposits processes a deposit for the beacon state before chainstart.
func ProcessPreGenesisDeposits(
ctx context.Context,
beaconState *stateTrie.BeaconState,
deposits []*ethpb.Deposit,
) (*stateTrie.BeaconState, error) {
var err error
beaconState, err = ProcessDeposits(ctx, beaconState, deposits)
if err != nil {
return nil, errors.Wrap(err, "could not process deposit")
}
for _, deposit := range deposits {
pubkey := deposit.Data.PublicKey
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
if !ok {
return beaconState, nil
}
balance, err := beaconState.BalanceAtIndex(index)
if err != nil {
return nil, err
}
validator, err := beaconState.ValidatorAtIndex(index)
if err != nil {
return nil, err
}
validator.EffectiveBalance = mathutil.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
if validator.EffectiveBalance ==
params.BeaconConfig().MaxEffectiveBalance {
validator.ActivationEligibilityEpoch = 0
validator.ActivationEpoch = 0
}
if err := beaconState.UpdateValidatorAtIndex(index, validator); err != nil {
return nil, err
}
}
return beaconState, nil
}
// ProcessDeposits is one of the operations performed on each processed
// beacon block to verify queued validators from the Ethereum 1.0 Deposit Contract
// into the beacon chain.
//
// Spec pseudocode definition:
// For each deposit in block.body.deposits:
// process_deposit(state, deposit)
func ProcessDeposits(
ctx context.Context,
beaconState *stateTrie.BeaconState,
deposits []*ethpb.Deposit,
) (*stateTrie.BeaconState, error) {
var err error
domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
if err != nil {
return nil, err
}
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
// individual deposits with signature verification enabled.
var verifySignature bool
if err := verifyDepositDataWithDomain(ctx, deposits, domain); err != nil {
log.WithError(err).Debug("Failed to verify deposit data, verifying signatures individually")
verifySignature = true
}
for _, deposit := range deposits {
if deposit == nil || deposit.Data == nil {
return nil, errors.New("got a nil deposit in block")
}
beaconState, err = ProcessDeposit(beaconState, deposit, verifySignature)
if err != nil {
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
}
}
return beaconState, nil
}
// ProcessDeposit takes in a deposit object and inserts it
// into the registry as a new validator or balance change.
//
// Spec pseudocode definition:
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
// # Verify the Merkle branch
// assert is_valid_merkle_branch(
// leaf=hash_tree_root(deposit.data),
// branch=deposit.proof,
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
// index=state.eth1_deposit_index,
// root=state.eth1_data.deposit_root,
// )
//
// # Deposits must be processed in order
// state.eth1_deposit_index += 1
//
// pubkey = deposit.data.pubkey
// amount = deposit.data.amount
// validator_pubkeys = [v.pubkey for v in state.validators]
// if pubkey not in validator_pubkeys:
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
// deposit_message = DepositMessage(
// pubkey=deposit.data.pubkey,
// withdrawal_credentials=deposit.data.withdrawal_credentials,
// amount=deposit.data.amount,
// )
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
// signing_root = compute_signing_root(deposit_message, domain)
// if not bls.Verify(pubkey, signing_root, deposit.data.signature):
// return
//
// # Add validator and balance entries
// state.validators.append(get_validator_from_deposit(state, deposit))
// state.balances.append(amount)
// else:
// # Increase balance by deposit amount
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
// increase_balance(state, index, amount)
func ProcessDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (*stateTrie.BeaconState, error) {
if err := verifyDeposit(beaconState, deposit); err != nil {
if deposit == nil || deposit.Data == nil {
return nil, err
}
return nil, errors.Wrapf(err, "could not verify deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
}
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
return nil, err
}
pubKey := deposit.Data.PublicKey
amount := deposit.Data.Amount
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok {
if verifySignature {
domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
if err != nil {
return nil, err
}
depositSig := deposit.Data.Signature
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
// Ignore this error as in the spec pseudo code.
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
return beaconState, nil
}
}
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
}
if err := beaconState.AppendValidator(&ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: deposit.Data.WithdrawalCredentials,
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: effectiveBalance,
}); err != nil {
return nil, err
}
if err := beaconState.AppendBalance(amount); err != nil {
return nil, err
}
} else {
if err := helpers.IncreaseBalance(beaconState, index, amount); err != nil {
return nil, err
}
}
return beaconState, nil
}
func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) error {
// Verify Merkle proof of deposit and deposit trie root.
if deposit == nil || deposit.Data == nil {
return errors.New("received nil deposit or nil deposit data")
}
eth1Data := beaconState.Eth1Data()
if eth1Data == nil {
return errors.New("received nil eth1data in the beacon state")
}
receiptRoot := eth1Data.DepositRoot
leaf, err := ssz.HashTreeRoot(deposit.Data)
if err != nil {
return errors.Wrap(err, "could not tree hash deposit data")
}
if ok := trieutil.VerifyMerkleBranch(
receiptRoot,
leaf[:],
int(beaconState.Eth1DepositIndex()),
deposit.Proof,
); !ok {
return fmt.Errorf(
"deposit merkle branch of deposit root did not verify for root: %#x",
receiptRoot,
)
}
return nil
}
// Deprecated: This method uses deprecated ssz.SigningRoot.
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
}
sig, err := bls.SignatureFromBytes(signature)
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
root, err := ssz.SigningRoot(obj)
if err != nil {
return errors.Wrap(err, "could not get signing root")
}
signingData := &pb.SigningData{
ObjectRoot: root[:],
Domain: domain,
}
ctrRoot, err := ssz.HashTreeRoot(signingData)
if err != nil {
return errors.Wrap(err, "could not get container root")
}
if !sig.Verify(publicKey, ctrRoot[:]) {
return helpers.ErrSigFailedToVerify
}
return nil
}
func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, domain []byte) error {
if len(deps) == 0 {
return nil
}
pks := make([]bls.PublicKey, len(deps))
sigs := make([]bls.Signature, len(deps))
msgs := make([][32]byte, len(deps))
for i, dep := range deps {
if ctx.Err() != nil {
return ctx.Err()
}
if dep == nil || dep.Data == nil {
return errors.New("nil deposit")
}
dpk, err := bls.PublicKeyFromBytes(dep.Data.PublicKey)
if err != nil {
return err
}
pks[i] = dpk
dsig, err := bls.SignatureFromBytes(dep.Data.Signature)
if err != nil {
return err
}
sigs[i] = dsig
root, err := ssz.SigningRoot(dep.Data)
if err != nil {
return errors.Wrap(err, "could not get signing root")
}
signingData := &pb.SigningData{
ObjectRoot: root[:],
Domain: domain,
}
ctrRoot, err := ssz.HashTreeRoot(signingData)
if err != nil {
return errors.Wrap(err, "could not get container root")
}
msgs[i] = ctrRoot
}
verify, err := bls.VerifyMultipleSignatures(sigs, msgs, pks)
if err != nil {
return errors.Errorf("could not verify multiple signatures: %v", err)
}
if !verify {
return errors.New("one or more deposit signatures did not verify")
}
return nil
}

View File

@@ -0,0 +1,328 @@
package blocks_test
import (
"context"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/trieutil"
)
func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
// Same validator created 3 valid deposits within the same block
testutil.ResetCache()
dep, _, err := testutil.DeterministicDepositsAndKeysSameValidator(3)
if err != nil {
t.Fatal(err)
}
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
// 3 deposits from the same validator
Deposits: []*ethpb.Deposit{dep[0], dep[1], dep[2]},
},
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
if err != nil {
t.Fatal(err)
}
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, block.Body.Deposits)
if err != nil {
t.Fatalf("Expected block deposits to process correctly, received: %v", err)
}
if len(newState.Validators()) != 2 {
t.Errorf("Incorrect validator count. Wanted %d, got %d", 2, len(newState.Validators()))
}
}
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
deposit := &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: []byte{1, 2, 3},
Signature: make([]byte, 96),
},
}
leaf, err := ssz.HashTreeRoot(deposit.Data)
if err != nil {
t.Fatal(err)
}
// We then create a merkle branch for the test.
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
if err != nil {
t.Fatalf("Could not generate trie: %v", err)
}
proof, err := depositTrie.MerkleProof(0)
if err != nil {
t.Fatalf("Could not generate proof: %v", err)
}
deposit.Proof = proof
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Deposits: []*ethpb.Deposit{deposit},
},
}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Eth1Data: &ethpb.Eth1Data{
DepositRoot: []byte{0},
BlockHash: []byte{1},
},
})
if err != nil {
t.Fatal(err)
}
want := "deposit root did not verify"
_, err = blocks.ProcessDeposits(context.Background(), beaconState, block.Body.Deposits)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected error: %s, received %v", want, err)
}
}
func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
if err != nil {
t.Fatal(err)
}
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Deposits: []*ethpb.Deposit{dep[0]},
},
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
if err != nil {
t.Fatal(err)
}
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, block.Body.Deposits)
if err != nil {
t.Fatalf("Expected block deposits to process correctly, received: %v", err)
}
if newState.Balances()[1] != dep[0].Data.Amount {
t.Errorf(
"Expected state validator balances index 0 to equal %d, received %d",
dep[0].Data.Amount,
newState.Balances()[1],
)
}
}
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
sk := bls.RandKey()
deposit := &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: sk.PublicKey().Marshal(),
Amount: 1000,
},
}
sr, err := helpers.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 8))
if err != nil {
t.Fatal(err)
}
sig := sk.Sign(sr[:])
deposit.Data.Signature = sig.Marshal()
leaf, err := ssz.HashTreeRoot(deposit.Data)
if err != nil {
t.Fatal(err)
}
// We then create a merkle branch for the test.
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
if err != nil {
t.Fatalf("Could not generate trie: %v", err)
}
proof, err := depositTrie.MerkleProof(0)
if err != nil {
t.Fatalf("Could not generate proof: %v", err)
}
deposit.Proof = proof
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Deposits: []*ethpb.Deposit{deposit},
},
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1, 2, 3},
},
{
PublicKey: sk.PublicKey().Marshal(),
WithdrawalCredentials: []byte{1},
},
}
balances := []uint64{0, 50}
root := depositTrie.Root()
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Balances: balances,
Eth1Data: &ethpb.Eth1Data{
DepositRoot: root[:],
BlockHash: root[:],
},
})
if err != nil {
t.Fatal(err)
}
newState, err := blocks.ProcessDeposits(context.Background(), beaconState, block.Body.Deposits)
if err != nil {
t.Fatalf("Process deposit failed: %v", err)
}
if newState.Balances()[1] != 1000+50 {
t.Errorf("Expected balance at index 1 to be 1050, received %d", newState.Balances()[1])
}
}
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
//Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
if err != nil {
t.Fatal(err)
}
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
if err != nil {
t.Fatal(err)
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
if err != nil {
t.Error(err)
}
newState, err := blocks.ProcessDeposit(beaconState, dep[0], true)
if err != nil {
t.Fatalf("Process deposit failed: %v", err)
}
if len(newState.Validators()) != 2 {
t.Errorf("Expected validator list to have length 2, received: %v", len(newState.Validators()))
}
if len(newState.Balances()) != 2 {
t.Fatalf("Expected validator balances list to have length 2, received: %v", len(newState.Balances()))
}
if newState.Balances()[1] != dep[0].Data.Amount {
t.Errorf(
"Expected state validator balances index 1 to equal %d, received %d",
dep[0].Data.Amount,
newState.Balances()[1],
)
}
}
func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
// Same test settings as in TestProcessDeposit_AddsNewValidatorDeposit, except that we use an invalid signature
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
if err != nil {
t.Fatal(err)
}
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := testutil.DepositTrieFromDeposits(dep)
if err != nil {
t.Fatal(err)
}
root := trie.Root()
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
}
registry := []*ethpb.Validator{
{
PublicKey: []byte{1},
WithdrawalCredentials: []byte{1, 2, 3},
},
}
balances := []uint64{0}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Balances: balances,
Eth1Data: eth1Data,
Fork: &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
},
})
if err != nil {
t.Fatal(err)
}
newState, err := blocks.ProcessDeposit(beaconState, dep[0], true)
if err != nil {
t.Fatalf("Expected invalid block deposit to be ignored without error, received: %v", err)
}
if newState.Eth1DepositIndex() != 1 {
t.Errorf(
"Expected Eth1DepositIndex to be increased by 1 after processing an invalid deposit, received change: %v",
newState.Eth1DepositIndex(),
)
}
if len(newState.Validators()) != 1 {
t.Errorf("Expected validator list to have length 1, received: %v", len(newState.Validators()))
}
if len(newState.Balances()) != 1 {
t.Errorf("Expected validator balances list to have length 1, received: %v", len(newState.Balances()))
}
if newState.Balances()[0] != 0 {
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
}
}

View File

@@ -0,0 +1,74 @@
package blocks
import (
"bytes"
"errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessEth1DataInBlock is an operation performed on each
// beacon block to ensure the ETH1 data votes are processed
// into the beacon state.
//
// Official spec definition:
// def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
// state.eth1_data_votes.append(body.eth1_data)
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
// state.latest_eth1_data = body.eth1_data
func ProcessEth1DataInBlock(beaconState *stateTrie.BeaconState, block *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
if beaconState == nil {
return nil, errors.New("nil state")
}
if block == nil || block.Body == nil {
return nil, errors.New("nil block or block withought body")
}
if err := beaconState.AppendEth1DataVotes(block.Body.Eth1Data); err != nil {
return nil, err
}
hasSupport, err := Eth1DataHasEnoughSupport(beaconState, block.Body.Eth1Data)
if err != nil {
return nil, err
}
if hasSupport {
if err := beaconState.SetEth1Data(block.Body.Eth1Data); err != nil {
return nil, err
}
}
return beaconState, nil
}
// AreEth1DataEqual checks equality between two eth1 data objects.
func AreEth1DataEqual(a, b *ethpb.Eth1Data) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
return a.DepositCount == b.DepositCount &&
bytes.Equal(a.BlockHash, b.BlockHash) &&
bytes.Equal(a.DepositRoot, b.DepositRoot)
}
// Eth1DataHasEnoughSupport returns true when the given eth1data has more than 50% votes in the
// eth1 voting period. A vote is cast by including eth1data in a block and part of state processing
// appends eth1data to the state in the Eth1DataVotes list. Iterating through this list checks the
// votes to see if they match the eth1data.
func Eth1DataHasEnoughSupport(beaconState *stateTrie.BeaconState, data *ethpb.Eth1Data) (bool, error) {
voteCount := uint64(0)
data = stateTrie.CopyETH1Data(data)
for _, vote := range beaconState.Eth1DataVotes() {
if AreEth1DataEqual(vote, data) {
voteCount++
}
}
// If 50+% majority converged on the same eth1data, then it has enough support to update the
// state.
support := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch
return voteCount*2 > support, nil
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -11,9 +12,9 @@ import (
"github.com/prysmaticlabs/prysm/shared/params"
)
func FakeDeposits(n int) []*ethpb.Eth1Data {
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
deposits := make([]*ethpb.Eth1Data, n)
for i := 0; i < n; i++ {
for i := uint64(0); i < n; i++ {
deposits[i] = &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: []byte("root"),
@@ -30,7 +31,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
votingPeriodLength uint64
}{
{
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)),
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: []byte("root"),
@@ -38,7 +39,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
hasSupport: true,
votingPeriodLength: 7,
}, {
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)),
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: []byte("root"),
@@ -46,7 +47,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
hasSupport: false,
votingPeriodLength: 8,
}, {
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)),
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: []byte("root"),
@@ -85,3 +86,113 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
})
}
}
func TestAreEth1DataEqual(t *testing.T) {
type args struct {
a *ethpb.Eth1Data
b *ethpb.Eth1Data
}
tests := []struct {
name string
args args
want bool
}{
{
name: "true when both are nil",
args: args{
a: nil,
b: nil,
},
want: true,
},
{
name: "false when only one is nil",
args: args{
a: nil,
b: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
},
want: false,
},
{
name: "true when real equality",
args: args{
a: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
b: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
},
want: true,
},
{
name: "false is field value differs",
args: args{
a: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
},
b: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 64,
BlockHash: make([]byte, 32),
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := blocks.AreEth1DataEqual(tt.args.a, tt.args.b); got != tt.want {
t.Errorf("AreEth1DataEqual() = %v, want %v", got, tt.want)
}
})
}
}
func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
beaconState, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Eth1DataVotes: []*ethpb.Eth1Data{},
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Eth1Data: &ethpb.Eth1Data{
DepositRoot: []byte{2},
BlockHash: []byte{3},
},
},
}
period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch
for i := uint64(0); i < period; i++ {
beaconState, err = blocks.ProcessEth1DataInBlock(beaconState, block)
if err != nil {
t.Fatal(err)
}
}
newETH1DataVotes := beaconState.Eth1DataVotes()
if len(newETH1DataVotes) <= 1 {
t.Error("Expected new ETH1 data votes to have length > 1")
}
if !proto.Equal(beaconState.Eth1Data(), beaconstate.CopyETH1Data(block.Body.Eth1Data)) {
t.Errorf(
"Expected latest eth1 data to have been set to %v, received %v",
block.Body.Eth1Data,
beaconState.Eth1Data(),
)
}
}

View File

@@ -0,0 +1,140 @@
package blocks
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessVoluntaryExits is one of the operations performed
// on each processed beacon block to determine which validators
// should exit the state's validator registry.
//
// Spec pseudocode definition:
// def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
// """
// Process ``VoluntaryExit`` operation.
// """
// validator = state.validator_registry[exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify the validator has not yet exited
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + PERSISTENT_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch)
// assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain)
// # Initiate exit
// initiate_validator_exit(state, exit.validator_index)
func ProcessVoluntaryExits(
ctx context.Context,
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
exits := body.VoluntaryExits
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
}
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
return nil, err
}
if err := VerifyExit(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorRoot()); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex)
if err != nil {
return nil, err
}
}
return beaconState, nil
}
// ProcessVoluntaryExitsNoVerify processes all the voluntary exits in
// a block body, without verifying their BLS signatures.
func ProcessVoluntaryExitsNoVerify(
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
var err error
exits := body.VoluntaryExits
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil exit")
}
beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex)
if err != nil {
return nil, errors.Wrapf(err, "failed to process voluntary exit at index %d", idx)
}
}
return beaconState, nil
}
// VerifyExit implements the spec defined validation for voluntary exits.
//
// Spec pseudocode definition:
// def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
// """
// Process ``VoluntaryExit`` operation.
// """
// validator = state.validator_registry[exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify the validator has not yet exited
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + PERSISTENT_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch)
// assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain)
func VerifyExit(validator *stateTrie.ReadOnlyValidator, currentSlot uint64, fork *pb.Fork, signed *ethpb.SignedVoluntaryExit, genesisRoot []byte) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
exit := signed.Exit
currentEpoch := helpers.SlotToEpoch(currentSlot)
// Verify the validator is active.
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
return errors.New("non-active validator cannot exit")
}
// Verify the validator has not yet exited.
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
return fmt.Errorf("validator has already exited at epoch: %v", validator.ExitEpoch())
}
// Exits must specify an epoch when they become valid; they are not valid before then.
if currentEpoch < exit.Epoch {
return fmt.Errorf("expected current epoch >= exit epoch, received %d < %d", currentEpoch, exit.Epoch)
}
// Verify the validator has been active long enough.
if currentEpoch < validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod {
return fmt.Errorf(
"validator has not been active long enough to exit, wanted epoch %d >= %d",
currentEpoch,
validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod,
)
}
domain, err := helpers.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
if err != nil {
return err
}
valPubKey := validator.PublicKey()
if err := helpers.VerifySigningRoot(exit, valPubKey[:], signed.Signature, domain); err != nil {
return helpers.ErrSigFailedToVerify
}
return nil
}

View File

@@ -0,0 +1,183 @@
package blocks_test
import (
"context"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestProcessVoluntaryExits_ValidatorNotActive(t *testing.T) {
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 0,
},
},
}
registry := []*ethpb.Validator{
{
ExitEpoch: 0,
},
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
want := "non-active validator cannot exit"
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessVoluntaryExits_InvalidExitEpoch(t *testing.T) {
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 10,
},
},
}
registry := []*ethpb.Validator{
{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Slot: 0,
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
want := "expected current epoch >= exit epoch"
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 0,
Epoch: 0,
},
},
}
registry := []*ethpb.Validator{
{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Slot: 10,
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
want := "validator has not been active long enough to exit"
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 0,
Epoch: 0,
},
},
}
registry := []*ethpb.Validator{
{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: 0,
},
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Fork: &pb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
})
if err != nil {
t.Fatal(err)
}
err = state.SetSlot(state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch))
if err != nil {
t.Fatal(err)
}
priv := bls.RandKey()
val, err := state.ValidatorAtIndex(0)
if err != nil {
t.Fatal(err)
}
val.PublicKey = priv.PublicKey().Marshal()[:]
if err := state.UpdateValidatorAtIndex(0, val); err != nil {
t.Fatal(err)
}
domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit, state.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
signingRoot, err := helpers.ComputeSigningRoot(exits[0].Exit, domain)
if err != nil {
t.Error(err)
}
sig := priv.Sign(signingRoot[:])
exits[0].Signature = sig.Marshal()
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
newState, err := blocks.ProcessVoluntaryExits(context.Background(), state, block.Body)
if err != nil {
t.Fatalf("Could not process exits: %v", err)
}
newRegistry := newState.Validators()
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch) {
t.Errorf("Expected validator exit epoch to be %d, got %d",
helpers.ActivationExitEpoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch), newRegistry[0].ExitEpoch)
}
}

View File

@@ -0,0 +1,135 @@
package blocks
import (
"bytes"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessBlockHeader validates a block by its header.
//
// Spec pseudocode definition:
//
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Save current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// parent_root=block.parent_root,
// # state_root: zeroed, overwritten in the next `process_slot` call
// body_root=hash_tree_root(block.body),
// # signature is always zeroed
// )
// # Verify proposer is not slashed
// proposer = state.validators[get_beacon_proposer_index(state)]
// assert not proposer.slashed
// # Verify proposer signature
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
func ProcessBlockHeader(
beaconState *stateTrie.BeaconState,
block *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {
beaconState, err := ProcessBlockHeaderNoVerify(beaconState, block.Block)
if err != nil {
return nil, err
}
// Verify proposer signature.
if err := VerifyBlockSignature(beaconState, block); err != nil {
return nil, err
}
return beaconState, nil
}
// ProcessBlockHeaderNoVerify validates a block by its header but skips proposer
// signature verification.
//
// WARNING: This method does not verify proposer signature. This is used for proposer to compute state root
// using a unsigned block.
//
// Spec pseudocode definition:
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Save current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// parent_root=block.parent_root,
// # state_root: zeroed, overwritten in the next `process_slot` call
// body_root=hash_tree_root(block.body),
// # signature is always zeroed
// )
// # Verify proposer is not slashed
// proposer = state.validators[get_beacon_proposer_index(state)]
// assert not proposer.slashed
func ProcessBlockHeaderNoVerify(
beaconState *stateTrie.BeaconState,
block *ethpb.BeaconBlock,
) (*stateTrie.BeaconState, error) {
if block == nil {
return nil, errors.New("nil block")
}
if beaconState.Slot() != block.Slot {
return nil, fmt.Errorf("state slot: %d is different than block slot: %d", beaconState.Slot(), block.Slot)
}
idx, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
return nil, err
}
if block.ProposerIndex != idx {
return nil, fmt.Errorf("proposer index: %d is different than calculated: %d", block.ProposerIndex, idx)
}
parentHeader := beaconState.LatestBlockHeader()
if parentHeader.Slot >= block.Slot {
return nil, fmt.Errorf("block.Slot %d must be greater than state.LatestBlockHeader.Slot %d", block.Slot, parentHeader.Slot)
}
parentRoot, err := stateutil.BlockHeaderRoot(parentHeader)
if err != nil {
return nil, err
}
if !bytes.Equal(block.ParentRoot, parentRoot[:]) {
return nil, fmt.Errorf(
"parent root %#x does not match the latest block header signing root in state %#x",
block.ParentRoot, parentRoot)
}
proposer, err := beaconState.ValidatorAtIndexReadOnly(idx)
if err != nil {
return nil, err
}
if proposer.Slashed() {
return nil, fmt.Errorf("proposer at index %d was previously slashed", idx)
}
bodyRoot, err := stateutil.BlockBodyRoot(block.Body)
if err != nil {
return nil, err
}
if err := beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: block.Slot,
ProposerIndex: block.ProposerIndex,
ParentRoot: block.ParentRoot,
StateRoot: params.BeaconConfig().ZeroHash[:],
BodyRoot: bodyRoot[:],
}); err != nil {
return nil, err
}
return beaconState, nil
}

View File

@@ -0,0 +1,489 @@
package blocks_test
import (
"io/ioutil"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/sirupsen/logrus"
)
func init() {
logrus.SetOutput(ioutil.Discard) // Ignore "validator activated" logs
}
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 10}, // Must be less than block.Slot
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
latestBlockSignedRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader())
if err != nil {
t.Error(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
pID, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: pID,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: latestBlockSignedRoot[:],
},
}
signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt)
if err != nil {
t.Fatalf("Failed to get signing root of block: %v", err)
}
blockSig := priv.Sign(signingRoot[:])
block.Signature = blockSig.Marshal()[:]
proposerIdx, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Fatal(err)
}
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
if err != nil {
t.Fatal(err)
}
_, err = blocks.ProcessBlockHeader(state, block)
if err == nil || err.Error() != "block.Slot 10 must be greater than state.LatestBlockHeader.Slot 10" {
t.Fatalf("did not get expected error, got %v", err)
}
}
func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
testutil.ResetCache()
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
if err := beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{Slot: 9}); err != nil {
t.Fatal(err)
}
if err := beaconState.SetSlot(10); err != nil {
t.Error(err)
}
lbhdr, err := stateutil.BlockHeaderRoot(beaconState.LatestBlockHeader())
if err != nil {
t.Error(err)
}
proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: proposerIdx,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: lbhdr[:],
},
}
dt, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt)
if err != nil {
t.Fatalf("Failed to get signing root of block: %v", err)
}
blockSig := privKeys[proposerIdx+1].Sign(signingRoot[:])
block.Signature = blockSig.Marshal()[:]
_, err = blocks.ProcessBlockHeader(beaconState, block)
want := "signature did not verify"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 9},
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
lbhsr, err := ssz.HashTreeRoot(state.LatestBlockHeader())
if err != nil {
t.Error(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
root, err := helpers.ComputeSigningRoot([]byte("hello"), dt)
if err != nil {
t.Error(err)
}
blockSig := priv.Sign(root[:])
validators[5896].PublicKey = priv.PublicKey().Marshal()
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: 1,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: lbhsr[:],
},
Signature: blockSig.Marshal(),
}
_, err = blocks.ProcessBlockHeader(state, block)
want := "is different than block slot"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 9},
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
root, err := helpers.ComputeSigningRoot([]byte("hello"), dt)
if err != nil {
t.Error(err)
}
blockSig := priv.Sign(root[:])
validators[5896].PublicKey = priv.PublicKey().Marshal()
pID, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: pID,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: []byte{'A'},
},
Signature: blockSig.Marshal(),
}
_, err = blocks.ProcessBlockHeader(state, block)
want := "does not match"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 9},
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
parentRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader())
if err != nil {
t.Error(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
root, err := helpers.ComputeSigningRoot([]byte("hello"), dt)
if err != nil {
t.Error(err)
}
blockSig := priv.Sign(root[:])
validators[12683].PublicKey = priv.PublicKey().Marshal()
pID, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: pID,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: parentRoot[:],
},
Signature: blockSig.Marshal(),
}
_, err = blocks.ProcessBlockHeader(state, block)
want := "was previously slashed"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestProcessBlockHeader_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 9},
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
latestBlockSignedRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader())
if err != nil {
t.Error(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
pID, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: pID,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: latestBlockSignedRoot[:],
},
}
signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt)
if err != nil {
t.Fatalf("Failed to get signing root of block: %v", err)
}
blockSig := priv.Sign(signingRoot[:])
block.Signature = blockSig.Marshal()[:]
bodyRoot, err := stateutil.BlockBodyRoot(block.Block.Body)
if err != nil {
t.Fatalf("Failed to hash block bytes got: %v", err)
}
proposerIdx, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Fatal(err)
}
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
if err != nil {
t.Fatal(err)
}
newState, err := blocks.ProcessBlockHeader(state, block)
if err != nil {
t.Fatalf("Failed to process block header got: %v", err)
}
var zeroHash [32]byte
nsh := newState.LatestBlockHeader()
expected := &ethpb.BeaconBlockHeader{
ProposerIndex: pID,
Slot: block.Block.Slot,
ParentRoot: latestBlockSignedRoot[:],
BodyRoot: bodyRoot[:],
StateRoot: zeroHash[:],
}
if !proto.Equal(nsh, expected) {
t.Errorf("Expected %v, received %v", expected, nsh)
}
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 10,
LatestBlockHeader: &ethpb.BeaconBlockHeader{Slot: 9},
Fork: &pb.Fork{
PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0},
},
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
latestBlockSignedRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader())
if err != nil {
t.Error(err)
}
currentEpoch := helpers.CurrentEpoch(state)
dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot())
if err != nil {
t.Fatalf("Failed to get domain form state: %v", err)
}
priv := bls.RandKey()
pID, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Error(err)
}
block := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ProposerIndex: pID,
Slot: 10,
Body: &ethpb.BeaconBlockBody{
RandaoReveal: []byte{'A', 'B', 'C'},
},
ParentRoot: latestBlockSignedRoot[:],
},
}
signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt)
if err != nil {
t.Fatalf("Failed to get signing root of block: %v", err)
}
blockSig := priv.Sign(signingRoot[:])
block.Signature = blockSig.Marshal()[:]
proposerIdx, err := helpers.BeaconProposerIndex(state)
if err != nil {
t.Fatal(err)
}
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
if err != nil {
t.Fatal(err)
}
set, err := blocks.BlockSignatureSet(state, block)
if err != nil {
t.Fatal(err)
}
verified, err := set.Verify()
if err != nil {
t.Fatal(err)
}
if !verified {
t.Error("Block signature set returned a set which was unable to be verified")
}
}

View File

@@ -0,0 +1,5 @@
package blocks
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "blocks")

View File

@@ -0,0 +1,98 @@
package blocks
import (
"context"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessProposerSlashings is one of the operations performed
// on each processed beacon block to slash proposers based on
// slashing conditions if any slashable events occurred.
//
// Spec pseudocode definition:
// def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
// """
// Process ``ProposerSlashing`` operation.
// """
// proposer = state.validator_registry[proposer_slashing.proposer_index]
// # Verify slots match
// assert proposer_slashing.header_1.slot == proposer_slashing.header_2.slot
// # But the headers are different
// assert proposer_slashing.header_1 != proposer_slashing.header_2
// # Check proposer is slashable
// assert is_slashable_validator(proposer, get_current_epoch(state))
// # Signatures are valid
// for header in (proposer_slashing.header_1, proposer_slashing.header_2):
// domain = get_domain(state, DOMAIN_BEACON_PROPOSER, slot_to_epoch(header.slot))
// assert bls_verify(proposer.pubkey, signing_root(header), header.signature, domain)
//
// slash_validator(state, proposer_slashing.proposer_index)
func ProcessProposerSlashings(
ctx context.Context,
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
var err error
for idx, slashing := range body.ProposerSlashings {
if slashing == nil {
return nil, errors.New("nil proposer slashings in block body")
}
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrapf(err, "could not verify proposer slashing %d", idx)
}
beaconState, err = v.SlashValidator(
beaconState, slashing.Header_1.Header.ProposerIndex,
)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}
}
return beaconState, nil
}
// VerifyProposerSlashing verifies that the data provided from slashing is valid.
func VerifyProposerSlashing(
beaconState *stateTrie.BeaconState,
slashing *ethpb.ProposerSlashing,
) error {
if slashing.Header_1 == nil || slashing.Header_1.Header == nil || slashing.Header_2 == nil || slashing.Header_2.Header == nil {
return errors.New("nil header cannot be verified")
}
if slashing.Header_1.Header.Slot != slashing.Header_2.Header.Slot {
return fmt.Errorf("mismatched header slots, received %d == %d", slashing.Header_1.Header.Slot, slashing.Header_2.Header.Slot)
}
if slashing.Header_1.Header.ProposerIndex != slashing.Header_2.Header.ProposerIndex {
return fmt.Errorf("mismatched indices, received %d == %d", slashing.Header_1.Header.ProposerIndex, slashing.Header_2.Header.ProposerIndex)
}
if proto.Equal(slashing.Header_1, slashing.Header_2) {
return errors.New("expected slashing headers to differ")
}
proposer, err := beaconState.ValidatorAtIndexReadOnly(slashing.Header_1.Header.ProposerIndex)
if err != nil {
return err
}
if !helpers.IsSlashableValidatorUsingTrie(proposer, helpers.SlotToEpoch(beaconState.Slot())) {
return fmt.Errorf("validator with key %#x is not slashable", proposer.PublicKey())
}
// Using headerEpoch1 here because both of the headers should have the same epoch.
domain, err := helpers.Domain(beaconState.Fork(), helpers.SlotToEpoch(slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
headers := []*ethpb.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2}
for _, header := range headers {
proposerPubKey := proposer.PublicKey()
if err := helpers.VerifySigningRoot(header.Header, proposerPubKey[:], header.Signature, domain); err != nil {
return errors.Wrap(err, "could not verify beacon block header")
}
}
return nil
}

View File

@@ -0,0 +1,202 @@
package blocks_test
import (
"context"
"fmt"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
testutil.ResetCache()
beaconState, _ := testutil.DeterministicGenesisState(t, 20)
currentSlot := uint64(0)
slashings := []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
},
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: 0,
},
},
},
}
if err := beaconState.SetSlot(currentSlot); err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: slashings,
},
}
want := "mismatched header slots"
_, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
testutil.ResetCache()
beaconState, _ := testutil.DeterministicGenesisState(t, 2)
currentSlot := uint64(0)
slashings := []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: 0,
},
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: 0,
},
},
},
}
if err := beaconState.SetSlot(currentSlot); err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: slashings,
},
}
want := "expected slashing headers to differ"
_, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
registry := []*ethpb.Validator{
{
PublicKey: []byte("key"),
Slashed: true,
ActivationEpoch: 0,
WithdrawableEpoch: 0,
},
}
currentSlot := uint64(0)
slashings := []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 0,
Slot: 0,
},
Signature: []byte("A"),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 0,
Slot: 0,
},
Signature: []byte("B"),
},
},
}
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Validators: registry,
Slot: currentSlot,
})
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: slashings,
},
}
want := fmt.Sprintf(
"validator with key %#x is not slashable",
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
)
_, err = blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Body)
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %s, received %v", want, err)
}
}
func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
// We test the case when data is correct and verify the validator
// registry has been updated.
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
proposerIdx := uint64(1)
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
header1 := &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
Slot: 0,
StateRoot: []byte("A"),
},
}
signingRoot, err := helpers.ComputeSigningRoot(header1.Header, domain)
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
header1.Signature = privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()[:]
header2 := &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
Slot: 0,
StateRoot: []byte("B"),
},
}
signingRoot, err = helpers.ComputeSigningRoot(header2.Header, domain)
if err != nil {
t.Errorf("Could not get signing root of beacon block header: %v", err)
}
header2.Signature = privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()[:]
slashings := []*ethpb.ProposerSlashing{
{
Header_1: header1,
Header_2: header2,
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: slashings,
},
}
newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Body)
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
newStateVals := newState.Validators()
if newStateVals[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf("Proposer with index 1 did not correctly exit,"+"wanted slot:%d, got:%d",
newStateVals[1].ExitEpoch, beaconState.Validators()[1].ExitEpoch)
}
}

View File

@@ -0,0 +1,77 @@
package blocks
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessRandao checks the block proposer's
// randao commitment and generates a new randao mix to update
// in the beacon state's latest randao mixes slice.
//
// Spec pseudocode definition:
// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
// epoch = get_current_epoch(state)
// # Verify RANDAO reveal
// proposer = state.validators[get_beacon_proposer_index(state)]
// signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO))
// assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal)
// # Mix in RANDAO reveal
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
func ProcessRandao(
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
buf, proposerPub, domain, err := randaoSigningData(beaconState)
if err != nil {
return nil, err
}
if err := verifySignature(buf, proposerPub[:], body.RandaoReveal, domain); err != nil {
return nil, errors.Wrap(err, "could not verify block randao")
}
beaconState, err = ProcessRandaoNoVerify(beaconState, body)
if err != nil {
return nil, errors.Wrap(err, "could not process randao")
}
return beaconState, nil
}
// ProcessRandaoNoVerify generates a new randao mix to update
// in the beacon state's latest randao mixes slice.
//
// Spec pseudocode definition:
// # Mix it in
// state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = (
// xor(get_randao_mix(state, get_current_epoch(state)),
// hash(body.randao_reveal))
// )
func ProcessRandaoNoVerify(
beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*stateTrie.BeaconState, error) {
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
// If block randao passed verification, we XOR the state's latest randao mix with the block's
// randao and update the state's corresponding latest randao mix value.
latestMixesLength := params.BeaconConfig().EpochsPerHistoricalVector
latestMixSlice, err := beaconState.RandaoMixAtIndex(currentEpoch % latestMixesLength)
if err != nil {
return nil, err
}
blockRandaoReveal := hashutil.Hash(body.RandaoReveal)
if len(blockRandaoReveal) != len(latestMixSlice) {
return nil, errors.New("blockRandaoReveal length doesnt match latestMixSlice length")
}
for i, x := range blockRandaoReveal {
latestMixSlice[i] ^= x
}
if err := beaconState.UpdateRandaoMixesAtIndex(currentEpoch%latestMixesLength, latestMixSlice); err != nil {
return nil, err
}
return beaconState, nil
}

View File

@@ -0,0 +1,112 @@
package blocks_test
import (
"bytes"
"encoding/binary"
"strings"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
// We fetch the proposer's index as that is whom the RANDAO will be verified against.
proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
t.Fatal(err)
}
epoch := uint64(0)
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, epoch)
domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
root, err := ssz.HashTreeRoot(&pb.SigningData{ObjectRoot: buf, Domain: domain})
if err != nil {
t.Fatal(err)
}
// We make the previous validator's index sign the message instead of the proposer.
epochSignature := privKeys[proposerIdx-1].Sign(root[:])
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
RandaoReveal: epochSignature.Marshal(),
},
}
want := "block randao: signature did not verify"
if _, err := blocks.ProcessRandao(
beaconState,
block.Body,
); err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestProcessRandao_SignatureVerifiesAndUpdatesLatestStateMixes(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
epoch := helpers.CurrentEpoch(beaconState)
epochSignature, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
RandaoReveal: epochSignature,
},
}
newState, err := blocks.ProcessRandao(
beaconState,
block.Body,
)
if err != nil {
t.Errorf("Unexpected error processing block randao: %v", err)
}
currentEpoch := helpers.CurrentEpoch(beaconState)
mix := newState.RandaoMixes()[currentEpoch%params.BeaconConfig().EpochsPerHistoricalVector]
if bytes.Equal(mix, params.BeaconConfig().ZeroHash[:]) {
t.Errorf(
"Expected empty signature to be overwritten by randao reveal, received %v",
params.BeaconConfig().EmptySignature,
)
}
}
func TestRandaoSignatureSet_OK(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
epoch := helpers.CurrentEpoch(beaconState)
epochSignature, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
if err != nil {
t.Fatal(err)
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
RandaoReveal: epochSignature,
},
}
set, _, err := blocks.RandaoSignatureSet(beaconState, block.Body)
if err != nil {
t.Fatal(err)
}
verified, err := set.Verify()
if err != nil {
t.Fatal(err)
}
if !verified {
t.Error("Unable to verify randao signature set")
}
}

View File

@@ -0,0 +1,227 @@
package blocks
import (
"context"
"encoding/binary"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
)
// retrieves the signature set from the raw data, public key,signature and domain provided.
func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
sig, err := bls.SignatureFromBytes(signature)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to signature")
}
signingData := &pb.SigningData{
ObjectRoot: signedData,
Domain: domain,
}
root, err := ssz.HashTreeRoot(signingData)
if err != nil {
return nil, errors.Wrap(err, "could not hash container")
}
return &bls.SignatureSet{
Signatures: []bls.Signature{sig},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{root},
}, nil
}
// verifies the signature from the raw data, public key and domain provided.
func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error {
set, err := retrieveSignatureSet(signedData, pub, signature, domain)
if err != nil {
return err
}
if len(set.Signatures) != 1 {
return errors.Errorf("signature set contains %d signatures instead of 1", len(set.Signatures))
}
// We assume only one signature set is returned here.
sig := set.Signatures[0]
publicKey := set.PublicKeys[0]
root := set.Messages[0]
if !sig.Verify(publicKey, root[:]) {
return helpers.ErrSigFailedToVerify
}
return nil
}
// VerifyBlockSignature verifies the proposer signature of a beacon block.
func VerifyBlockSignature(beaconState *stateTrie.BeaconState, block *ethpb.SignedBeaconBlock) error {
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(block.Block.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return helpers.VerifyBlockSigningRoot(block.Block, proposerPubKey[:], block.Signature, domain)
}
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
func BlockSignatureSet(beaconState *stateTrie.BeaconState, block *ethpb.SignedBeaconBlock) (*bls.SignatureSet, error) {
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(block.Block.ProposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return helpers.RetrieveBlockSignatureSet(block.Block, proposerPubKey, block.Signature, domain)
}
// RandaoSignatureSet retrieves the relevant randao specific signature set object
// from a block and its corresponding state.
func RandaoSignatureSet(beaconState *stateTrie.BeaconState,
body *ethpb.BeaconBlockBody,
) (*bls.SignatureSet, *stateTrie.BeaconState, error) {
buf, proposerPub, domain, err := randaoSigningData(beaconState)
if err != nil {
return nil, nil, err
}
set, err := retrieveSignatureSet(buf, proposerPub[:], body.RandaoReveal, domain)
if err != nil {
return nil, nil, err
}
return set, beaconState, nil
}
// retrieves the randao related signing data from the state.
func randaoSigningData(beaconState *stateTrie.BeaconState) ([]byte, []byte, []byte, error) {
proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "could not get beacon proposer index")
}
proposerPub := beaconState.PubkeyAtIndex(proposerIdx)
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, currentEpoch)
domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
if err != nil {
return nil, nil, nil, err
}
return buf, proposerPub[:], domain, nil
}
// Method to break down attestations of the same domain and collect them into a single signature set.
func createAttestationSignatureSet(ctx context.Context, beaconState *stateTrie.BeaconState, atts []*ethpb.Attestation, domain []byte) (*bls.SignatureSet, error) {
if len(atts) == 0 {
return nil, nil
}
sigs := make([]bls.Signature, len(atts))
pks := make([]bls.PublicKey, len(atts))
msgs := make([][32]byte, len(atts))
for i, a := range atts {
sig, err := bls.SignatureFromBytes(a.Signature)
if err != nil {
return nil, err
}
sigs[i] = sig
c, err := helpers.BeaconCommitteeFromState(beaconState, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, err
}
ia := attestationutil.ConvertToIndexed(ctx, a, c)
indices := ia.AttestingIndices
var pk bls.PublicKey
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
p, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
if err != nil {
return nil, errors.Wrap(err, "could not deserialize validator public key")
}
if pk == nil {
pk = p
} else {
pk.Aggregate(p)
}
}
pks[i] = pk
root, err := helpers.ComputeSigningRoot(ia.Data, domain)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root of object")
}
msgs[i] = root
}
return &bls.SignatureSet{
Signatures: sigs,
PublicKeys: pks,
Messages: msgs,
}, nil
}
// AttestationSignatureSet retrieves all the related attestation signature data such as the relevant public keys,
// signatures and attestation signing data and collate it into a signature set object.
func AttestationSignatureSet(ctx context.Context, beaconState *stateTrie.BeaconState, atts []*ethpb.Attestation) (*bls.SignatureSet, error) {
if len(atts) == 0 {
return bls.NewSet(), nil
}
fork := beaconState.Fork()
gvr := beaconState.GenesisValidatorRoot()
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.
var preForkAtts []*ethpb.Attestation
var postForkAtts []*ethpb.Attestation
for _, a := range atts {
if helpers.SlotToEpoch(a.Data.Slot) < fork.Epoch {
preForkAtts = append(preForkAtts, a)
} else {
postForkAtts = append(postForkAtts, a)
}
}
set := bls.NewSet()
// Check attestations from before the fork.
if fork.Epoch > 0 { // Check to prevent underflow.
prevDomain, err := helpers.Domain(fork, fork.Epoch-1, dt, gvr)
if err != nil {
return nil, err
}
aSet, err := createAttestationSignatureSet(ctx, beaconState, preForkAtts, prevDomain)
if err != nil {
return nil, err
}
set.Join(aSet)
} else if len(preForkAtts) > 0 {
// This is a sanity check that preForkAtts were not ignored when fork.Epoch == 0. This
// condition is not possible, but it doesn't hurt to check anyway.
return nil, errors.New("some attestations were not verified from previous fork before genesis")
}
// Then check attestations from after the fork.
currDomain, err := helpers.Domain(fork, fork.Epoch, dt, gvr)
if err != nil {
return nil, err
}
aSet, err := createAttestationSignatureSet(ctx, beaconState, postForkAtts, currDomain)
if err != nil {
return nil, err
}
return set.Join(aSet), nil
}

View File

@@ -1,11 +1,13 @@
package spectest
import (
"context"
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
@@ -29,7 +31,9 @@ func runDepositTest(t *testing.T, config string) {
}
body := &ethpb.BeaconBlockBody{Deposits: []*ethpb.Deposit{deposit}}
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessDeposits)
testutil.RunBlockOperationTest(t, folderPath, body, func(ctx context.Context, state *state.BeaconState, body *ethpb.BeaconBlockBody) (*state.BeaconState, error) {
return blocks.ProcessDeposits(ctx, state, body.Deposits)
})
})
}
}

View File

@@ -113,7 +113,7 @@ func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconStat
sort.Sort(sortableIndices{indices: activationQ, validators: vals})
// Only activate just enough validators according to the activation churn limit.
limit := len(activationQ)
limit := uint64(len(activationQ))
activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch)
if err != nil {
return nil, errors.Wrap(err, "could not get active validator count")
@@ -125,8 +125,8 @@ func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconStat
}
// Prevent churn limit cause index out of bound.
if int(churnLimit) < limit {
limit = int(churnLimit)
if churnLimit < limit {
limit = churnLimit
}
activationExitEpoch := helpers.ActivationExitEpoch(currentEpoch)
@@ -209,6 +209,7 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
// index_epoch = Epoch(next_epoch + ACTIVATION_EXIT_DELAY)
// index_root_position = index_epoch % EPOCHS_PER_HISTORICAL_VECTOR
// indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, index_epoch))
@@ -273,22 +274,22 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
// Set total slashed balances.
slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector
slashedEpoch := int(nextEpoch % slashedExitLength)
slashedEpoch := nextEpoch % slashedExitLength
slashings := state.Slashings()
if len(slashings) != int(slashedExitLength) {
if uint64(len(slashings)) != slashedExitLength {
return nil, fmt.Errorf(
"state slashing length %d different than EpochsPerHistoricalVector %d",
len(slashings),
slashedExitLength,
)
}
if err := state.UpdateSlashingsAtIndex(uint64(slashedEpoch) /* index */, 0 /* value */); err != nil {
if err := state.UpdateSlashingsAtIndex(slashedEpoch /* index */, 0 /* value */); err != nil {
return nil, err
}
// Set RANDAO mix.
randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector
if state.RandaoMixesLength() != int(randaoMixLength) {
if uint64(state.RandaoMixesLength()) != randaoMixLength {
return nil, fmt.Errorf(
"state randao length %d different than EpochsPerHistoricalVector %d",
state.RandaoMixesLength(),
@@ -350,14 +351,12 @@ func unslashedAttestingIndices(state *stateTrie.BeaconState, atts []*pb.PendingA
}
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
// Create a set for attesting indices
set := make([]uint64, 0, len(attestingIndices))
for _, index := range attestingIndices {
if !seen[index] {
set = append(set, index)
setIndices = append(setIndices, index)
}
seen[index] = true
}
setIndices = append(setIndices, set...)
}
// Sort the attesting set indices by increasing order.
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })

View File

@@ -399,7 +399,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
if err != nil {
t.Error(err)
}
for i := 0; i < int(limit)+10; i++ {
for i := uint64(0); i < limit+10; i++ {
base.Validators = append(base.Validators, &ethpb.Validator{
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -417,11 +417,11 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
t.Errorf("Could not update registry %d, wanted activation eligibility epoch %d got %d",
i, currentEpoch, validator.ActivationEligibilityEpoch)
}
if i < int(limit) && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
if uint64(i) < limit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
}
if i >= int(limit) && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
if uint64(i) >= limit && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d",
i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch)
}

View File

@@ -10,6 +10,44 @@ import (
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
// Note: this is an optimized version by passing in precomputed total and attesting balances.
// def process_justification_and_finalization(state: BeaconState) -> None:
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
// return
//
// previous_epoch = get_previous_epoch(state)
// current_epoch = get_current_epoch(state)
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
// old_current_justified_checkpoint = state.current_justified_checkpoint
//
// # Process justifications
// state.previous_justified_checkpoint = state.current_justified_checkpoint
// state.justification_bits[1:] = state.justification_bits[:-1]
// state.justification_bits[0] = 0b0
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
// root=get_block_root(state, previous_epoch))
// state.justification_bits[1] = 0b1
// matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
// root=get_block_root(state, current_epoch))
// state.justification_bits[0] = 0b1
//
// # Process finalizations
// bits = state.justification_bits
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
func ProcessJustificationAndFinalizationPreCompute(state *stateTrie.BeaconState, pBal *Balance) (*stateTrie.BeaconState, error) {
if state.Slot() <= helpers.StartSlot(2) {
return state, nil

View File

@@ -10,7 +10,6 @@ import (
func TestMain(m *testing.M) {
prevConfig := params.BeaconConfig().Copy()
c := params.BeaconConfig()
// TODO(2312): remove this and use the mainnet count.
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)

View File

@@ -21,6 +21,7 @@ go_library(
"//shared/benchutil/benchmark_files:__subpackages__",
"//shared/interop:__pkg__",
"//shared/keystore:__pkg__",
"//shared/depositutil:__pkg__",
"//shared/p2putils:__pkg__",
"//shared/attestationutil:__pkg__",
"//shared/testutil:__pkg__",
@@ -63,6 +64,7 @@ go_test(
embed = [":go_default_library"],
shard_count = 2,
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
@@ -76,30 +78,5 @@ go_test(
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
],
)
# gazelle:exclude attestation_aggregation_bench_test.go
go_test(
name = "go_benchmark_test",
size = "medium",
srcs = ["attestation_aggregation_bench_test.go"],
args = [
"-test.bench=.",
"-test.benchmem",
"-test.v",
],
embed = [":go_default_library"],
local = True,
tags = [
"benchmark",
"manual",
"no-cache",
],
deps = [
"//shared/bls:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -2,127 +2,25 @@ package helpers
import (
"encoding/binary"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
)
var (
// ErrAttestationAggregationBitsOverlap is returned when two attestations aggregation
// bits overlap with each other.
ErrAttestationAggregationBitsOverlap = errors.New("overlapping aggregation bits")
// ErrAttestationAggregationBitsDifferentLen is returned when two attestation aggregation bits
// have different lengths.
ErrAttestationAggregationBitsDifferentLen = errors.New("different bitlist lengths")
)
// AggregateAttestations such that the minimal number of attestations are returned.
// Note: this is currently a naive implementation to the order of O(n^2).
func AggregateAttestations(atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
if len(atts) <= 1 {
return atts, nil
}
// Naive aggregation. O(n^2) time.
for i, a := range atts {
if i >= len(atts) {
break
}
for j := i + 1; j < len(atts); j++ {
b := atts[j]
if a.AggregationBits.Len() == b.AggregationBits.Len() && !a.AggregationBits.Overlaps(b.AggregationBits) {
var err error
a, err = AggregateAttestation(a, b)
if err != nil {
return nil, err
}
// Delete b
atts = append(atts[:j], atts[j+1:]...)
j--
atts[i] = a
}
}
}
// Naive deduplication of identical aggregations. O(n^2) time.
for i, a := range atts {
for j := i + 1; j < len(atts); j++ {
b := atts[j]
if a.AggregationBits.Len() != b.AggregationBits.Len() {
continue
}
if a.AggregationBits.Contains(b.AggregationBits) {
// If b is fully contained in a, then b can be removed.
atts = append(atts[:j], atts[j+1:]...)
j--
} else if b.AggregationBits.Contains(a.AggregationBits) {
// if a is fully contained in b, then a can be removed.
atts = append(atts[:i], atts[i+1:]...)
i--
break // Stop the inner loop, advance a.
}
}
}
return atts, nil
}
// BLS aggregate signature aliases for testing / benchmark substitution. These methods are
// significantly more expensive than the inner logic of AggregateAttestations so they must be
// substituted for benchmarks which analyze AggregateAttestations.
var aggregateSignatures = bls.AggregateSignatures
var signatureFromBytes = bls.SignatureFromBytes
// AggregateAttestation aggregates attestations a1 and a2 together.
func AggregateAttestation(a1 *ethpb.Attestation, a2 *ethpb.Attestation) (*ethpb.Attestation, error) {
if a1.AggregationBits.Len() != a2.AggregationBits.Len() {
return nil, ErrAttestationAggregationBitsDifferentLen
}
if a1.AggregationBits.Overlaps(a2.AggregationBits) {
return nil, ErrAttestationAggregationBitsOverlap
}
baseAtt := stateTrie.CopyAttestation(a1)
newAtt := stateTrie.CopyAttestation(a2)
if newAtt.AggregationBits.Count() > baseAtt.AggregationBits.Count() {
baseAtt, newAtt = newAtt, baseAtt
}
if baseAtt.AggregationBits.Contains(newAtt.AggregationBits) {
return baseAtt, nil
}
newBits := baseAtt.AggregationBits.Or(newAtt.AggregationBits)
newSig, err := signatureFromBytes(newAtt.Signature)
if err != nil {
return nil, err
}
baseSig, err := signatureFromBytes(baseAtt.Signature)
if err != nil {
return nil, err
}
aggregatedSig := aggregateSignatures([]*bls.Signature{baseSig, newSig})
baseAtt.Signature = aggregatedSig.Marshal()
baseAtt.AggregationBits = newBits
return baseAtt, nil
}
// SlotSignature returns the signed signature of the hash tree root of input slot.
//
// Spec pseudocode definition:
// def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
// return bls_sign(privkey, hash_tree_root(slot), domain)
func SlotSignature(state *stateTrie.BeaconState, slot uint64, privKey *bls.SecretKey) (*bls.Signature, error) {
// domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot))
// signing_root = compute_signing_root(slot, domain)
// return bls.Sign(privkey, signing_root)
func SlotSignature(state *stateTrie.BeaconState, slot uint64, privKey bls.SecretKey) (bls.Signature, error) {
d, err := Domain(state.Fork(), CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
if err != nil {
return nil, err
@@ -159,16 +57,16 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls_aggregate_signatures(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (*bls.Signature, error) {
sigs := make([]*bls.Signature, len(attestations))
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = signatureFromBytes(attestations[i].Signature)
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return aggregateSignatures(sigs), nil
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
@@ -213,3 +111,48 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount, comIdx, attSlot uint64) u
computedSubnet := (commsSinceStart + comIdx) % params.BeaconNetworkConfig().AttestationSubnetCount
return computedSubnet
}
// ValidateAttestationTime Validates that the incoming attestation is in the desired time range.
// An attestation is valid only if received within the last ATTESTATION_PROPAGATION_SLOT_RANGE
// slots.
//
// Example:
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
// current_slot = 100
// invalid_attestation_slot = 92
// invalid_attestation_slot = 101
// valid_attestation_slot = 98
// In the attestation must be within the range of 95 to 100 in the example above.
func ValidateAttestationTime(attSlot uint64, genesisTime time.Time) error {
attTime := genesisTime.Add(time.Duration(attSlot*params.BeaconConfig().SecondsPerSlot) * time.Second)
currentSlot := SlotsSince(genesisTime)
// A clock disparity allows for minor tolerances outside of the expected range. This value is
// usually small, less than 1 second.
clockDisparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
// An attestation cannot be from the future, so the upper bounds is set to now, with a minor
// tolerance for peer clock disparity.
upperBounds := roughtime.Now().Add(clockDisparity)
// An attestation cannot be older than the current slot - attestation propagation slot range
// with a minor tolerance for peer clock disparity.
lowerBoundsSlot := uint64(0)
if currentSlot > params.BeaconNetworkConfig().AttestationPropagationSlotRange {
lowerBoundsSlot = currentSlot - params.BeaconNetworkConfig().AttestationPropagationSlotRange
}
lowerBounds := genesisTime.Add(
time.Duration(lowerBoundsSlot*params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(-clockDisparity)
// Verify attestation slot within the time range.
if attTime.Before(lowerBounds) || attTime.After(upperBounds) {
return fmt.Errorf(
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
attSlot,
currentSlot-params.BeaconNetworkConfig().AttestationPropagationSlotRange,
currentSlot,
)
}
return nil
}

View File

@@ -1,110 +0,0 @@
package helpers
import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/shared/bls"
)
func bitlistWithAllBitsSet(length uint64) bitfield.Bitlist {
b := bitfield.NewBitlist(length)
for i := uint64(0); i < length; i++ {
b.SetBitAt(i, true)
}
return b
}
func bitlistsWithSingleBitSet(length uint64) []bitfield.Bitlist {
lists := make([]bitfield.Bitlist, length)
for i := uint64(0); i < length; i++ {
b := bitfield.NewBitlist(length)
b.SetBitAt(i, true)
lists[i] = b
}
return lists
}
func BenchmarkAggregateAttestations(b *testing.B) {
// Override expensive BLS aggregation method with cheap no-op such that this benchmark profiles
// the logic of aggregation selection rather than BLS logic.
aggregateSignatures = func(sigs []*bls.Signature) *bls.Signature {
return sigs[0]
}
signatureFromBytes = func(sig []byte) (*bls.Signature, error) {
return bls.NewAggregateSignature(), nil
}
defer func() {
aggregateSignatures = bls.AggregateSignatures
signatureFromBytes = bls.SignatureFromBytes
}()
// Each test defines the aggregation bitfield inputs and the wanted output result.
tests := []struct {
name string
inputs []bitfield.Bitlist
want []bitfield.Bitlist
}{
{
name: "64 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(64),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(64),
},
},
{
name: "128 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(128),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(128),
},
},
{
name: "256 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(256),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(256),
},
},
{
name: "512 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(512),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(512),
},
},
{
name: "1024 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(1024),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(1024),
},
},
}
var makeAttestationsFromBitlists = func(bl []bitfield.Bitlist) []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, len(bl))
for i, b := range bl {
atts[i] = &ethpb.Attestation{
AggregationBits: b,
Data: nil,
Signature: bls.NewAggregateSignature().Marshal(),
}
}
return atts
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
atts := makeAttestationsFromBitlists(tt.inputs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := AggregateAttestations(atts)
if err != nil {
b.Fatal(err)
}
}
})
}
}

View File

@@ -1,244 +1,22 @@
package helpers_test
import (
"bytes"
"sort"
"strconv"
"testing"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestAggregateAttestation(t *testing.T) {
tests := []struct {
a1 *ethpb.Attestation
a2 *ethpb.Attestation
want *ethpb.Attestation
}{
{a1: &ethpb.Attestation{AggregationBits: []byte{}},
a2: &ethpb.Attestation{AggregationBits: []byte{}},
want: &ethpb.Attestation{AggregationBits: []byte{}}},
{a1: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x03}},
a2: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x02}},
want: &ethpb.Attestation{AggregationBits: []byte{0x03}}},
{a1: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x02}},
a2: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x03}},
want: &ethpb.Attestation{AggregationBits: []byte{0x03}}},
}
for _, tt := range tests {
got, err := helpers.AggregateAttestation(tt.a1, tt.a2)
if err != nil {
t.Fatal(err)
}
if !ssz.DeepEqual(got, tt.want) {
t.Errorf("AggregateAttestation() = %v, want %v", got, tt.want)
}
}
}
func TestAggregateAttestation_OverlapFails(t *testing.T) {
tests := []struct {
a1 *ethpb.Attestation
a2 *ethpb.Attestation
}{
{a1: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x1F}},
a2: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x11}}},
{a1: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0xFF, 0x85}},
a2: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x13, 0x8F}}},
}
for _, tt := range tests {
_, err := helpers.AggregateAttestation(tt.a1, tt.a2)
if err != helpers.ErrAttestationAggregationBitsOverlap {
t.Error("Did not receive wanted error")
}
}
}
func TestAggregateAttestation_DiffLengthFails(t *testing.T) {
tests := []struct {
a1 *ethpb.Attestation
a2 *ethpb.Attestation
}{
{a1: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x0F}},
a2: &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0x11}}},
}
for _, tt := range tests {
_, err := helpers.AggregateAttestation(tt.a1, tt.a2)
if err != helpers.ErrAttestationAggregationBitsDifferentLen {
t.Error("Did not receive wanted error")
}
}
}
func bitlistWithAllBitsSet(length uint64) bitfield.Bitlist {
b := bitfield.NewBitlist(length)
for i := uint64(0); i < length; i++ {
b.SetBitAt(i, true)
}
return b
}
func bitlistsWithSingleBitSet(length uint64) []bitfield.Bitlist {
lists := make([]bitfield.Bitlist, length)
for i := uint64(0); i < length; i++ {
b := bitfield.NewBitlist(length)
b.SetBitAt(i, true)
lists[i] = b
}
return lists
}
func TestAggregateAttestations(t *testing.T) {
// Each test defines the aggregation bitfield inputs and the wanted output result.
tests := []struct {
name string
inputs []bitfield.Bitlist
want []bitfield.Bitlist
}{
{
name: "two attestations with no overlap",
inputs: []bitfield.Bitlist{
{0b00000001, 0b1},
{0b00000010, 0b1},
},
want: []bitfield.Bitlist{
{0b00000011, 0b1},
},
},
{
name: "256 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(256),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(256),
},
},
{
name: "1024 attestations with single bit set",
inputs: bitlistsWithSingleBitSet(1024),
want: []bitfield.Bitlist{
bitlistWithAllBitsSet(1024),
},
},
{
name: "two attestations with overlap",
inputs: []bitfield.Bitlist{
{0b00000101, 0b1},
{0b00000110, 0b1},
},
want: []bitfield.Bitlist{
{0b00000101, 0b1},
{0b00000110, 0b1},
},
},
{
name: "some attestations overlap",
inputs: []bitfield.Bitlist{
{0b00001001, 0b1},
{0b00010110, 0b1},
{0b00001010, 0b1},
{0b00110001, 0b1},
},
want: []bitfield.Bitlist{
{0b00111011, 0b1},
{0b00011111, 0b1},
},
},
{
name: "some attestations produce duplicates which are removed",
inputs: []bitfield.Bitlist{
{0b00000101, 0b1},
{0b00000110, 0b1},
{0b00001010, 0b1},
{0b00001001, 0b1},
},
want: []bitfield.Bitlist{
{0b00001111, 0b1}, // both 0&1 and 2&3 produce this bitlist
},
},
{
name: "two attestations where one is fully contained within the other",
inputs: []bitfield.Bitlist{
{0b00000001, 0b1},
{0b00000011, 0b1},
},
want: []bitfield.Bitlist{
{0b00000011, 0b1},
},
},
{
name: "two attestations where one is fully contained within the other reversed",
inputs: []bitfield.Bitlist{
{0b00000011, 0b1},
{0b00000001, 0b1},
},
want: []bitfield.Bitlist{
{0b00000011, 0b1},
},
},
{
name: "attestations with different bitlist lengths",
inputs: []bitfield.Bitlist{
{0b00000011, 0b10},
{0b00000111, 0b100},
{0b00000100, 0b1},
},
want: []bitfield.Bitlist{
{0b00000011, 0b10},
{0b00000111, 0b100},
{0b00000100, 0b1},
},
},
}
var makeAttestationsFromBitlists = func(bl []bitfield.Bitlist) []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, len(bl))
for i, b := range bl {
sk := bls.RandKey()
sig := sk.Sign([]byte("dummy_test_data"))
atts[i] = &ethpb.Attestation{
AggregationBits: b,
Data: nil,
Signature: sig.Marshal(),
}
}
return atts
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := helpers.AggregateAttestations(makeAttestationsFromBitlists(tt.inputs))
if err != nil {
t.Fatal(err)
}
sort.Slice(got, func(i, j int) bool {
return got[i].AggregationBits.Bytes()[0] < got[j].AggregationBits.Bytes()[0]
})
sort.Slice(tt.want, func(i, j int) bool {
return tt.want[i].Bytes()[0] < tt.want[j].Bytes()[0]
})
if len(got) != len(tt.want) {
t.Logf("got=%v", got)
t.Fatalf("Wrong number of responses. Got %d, wanted %d", len(got), len(tt.want))
}
for i, w := range tt.want {
if !bytes.Equal(got[i].AggregationBits.Bytes(), w.Bytes()) {
t.Errorf("Unexpected bitlist at index %d, got %b, wanted %b", i, got[i].AggregationBits.Bytes(), w.Bytes())
}
}
})
}
}
func TestSlotSignature_Verify(t *testing.T) {
func TestAttestation_SlotSignature(t *testing.T) {
priv := bls.RandKey()
pub := priv.PublicKey()
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
@@ -259,7 +37,8 @@ func TestSlotSignature_Verify(t *testing.T) {
t.Fatal(err)
}
domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state),
params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
if err != nil {
t.Fatal(err)
}
@@ -272,85 +51,88 @@ func TestSlotSignature_Verify(t *testing.T) {
}
}
func TestIsAggregator_True(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
func TestAttestation_IsAggregator(t *testing.T) {
t.Run("aggregator", func(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
committee, err := helpers.BeaconCommitteeFromState(beaconState, 0, 0)
if err != nil {
t.Fatal(err)
}
sig := privKeys[0].Sign([]byte{'A'})
agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal())
if err != nil {
t.Fatal(err)
}
if !agg {
t.Error("Wanted aggregator true, got false")
}
})
committee, err := helpers.BeaconCommitteeFromState(beaconState, 0, 0)
if err != nil {
t.Fatal(err)
}
sig := privKeys[0].Sign([]byte{'A'})
agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal())
if err != nil {
t.Fatal(err)
}
if !agg {
t.Error("Wanted aggregator true, got false")
}
t.Run("not aggregator", func(t *testing.T) {
params.UseMinimalConfig()
defer params.UseMainnetConfig()
beaconState, privKeys := testutil.DeterministicGenesisState(t, 2048)
committee, err := helpers.BeaconCommitteeFromState(beaconState, 0, 0)
if err != nil {
t.Fatal(err)
}
sig := privKeys[0].Sign([]byte{'A'})
agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal())
if err != nil {
t.Fatal(err)
}
if agg {
t.Error("Wanted aggregator false, got true")
}
})
}
func TestIsAggregator_False(t *testing.T) {
params.UseMinimalConfig()
defer params.UseMainnetConfig()
beaconState, privKeys := testutil.DeterministicGenesisState(t, 2048)
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv := bls.RandKey()
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
if err != nil {
t.Fatal(err)
}
if !aggSig.FastAggregateVerify(pubkeys, msg) {
t.Error("Signature did not verify")
}
})
committee, err := helpers.BeaconCommitteeFromState(beaconState, 0, 0)
if err != nil {
t.Fatal(err)
}
sig := privKeys[0].Sign([]byte{'A'})
agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal())
if err != nil {
t.Fatal(err)
}
if agg {
t.Error("Wanted aggregator false, got true")
}
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv := bls.RandKey()
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
if err != nil {
t.Fatal(err)
}
if aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)) {
t.Error("Signature not suppose to verify")
}
})
}
func TestAggregateSignature_True(t *testing.T) {
pubkeys := make([]*bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv := bls.RandKey()
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
if err != nil {
t.Fatal(err)
}
if !aggSig.FastAggregateVerify(pubkeys, msg) {
t.Error("Signature did not verify")
}
}
func TestAggregateSignature_False(t *testing.T) {
pubkeys := make([]*bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv := bls.RandKey()
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
if err != nil {
t.Fatal(err)
}
if aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)) {
t.Error("Signature not suppose to verify")
}
}
func TestComputeSubnetForAttestation_ComputeForAttestation(t *testing.T) {
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
@@ -399,3 +181,89 @@ func TestComputeSubnetForAttestation_ComputeForAttestation(t *testing.T) {
t.Errorf("Did not get correct subnet for attestation, wanted %d but got %d", 6, sub)
}
}
func Test_ValidateAttestationTime(t *testing.T) {
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
}
type args struct {
attSlot uint64
genesisTime time.Time
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "attestation.slot == current_slot",
args: args{
attSlot: 15,
genesisTime: roughtime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantErr: false,
},
{
name: "attestation.slot == current_slot, received in middle of slot",
args: args{
attSlot: 15,
genesisTime: roughtime.Now().Add(
-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(-(time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second)),
},
wantErr: false,
},
{
name: "attestation.slot == current_slot, received 200ms early",
args: args{
attSlot: 16,
genesisTime: roughtime.Now().Add(
-16 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(-200 * time.Millisecond),
},
wantErr: false,
},
{
name: "attestation.slot > current_slot",
args: args{
attSlot: 16,
genesisTime: roughtime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantErr: true,
},
{
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE",
args: args{
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
genesisTime: roughtime.Now().Add(-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantErr: true,
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE",
args: args{
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: roughtime.Now().Add(-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantErr: false,
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late",
args: args{
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: roughtime.Now().Add(
-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := helpers.ValidateAttestationTime(tt.args.attSlot, tt.args.genesisTime); (err != nil) != tt.wantErr {
t.Errorf("validateAggregateAttTime() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -30,7 +30,7 @@ func BlockRootAtSlot(state *stateTrie.BeaconState, slot uint64) ([]byte, error)
// """
// Return the block root at the start of a recent ``epoch``.
// """
// return get_block_root_at_slot(state, compute_start_slot_of_epoch(epoch))
// return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
func BlockRoot(state *stateTrie.BeaconState, epoch uint64) ([]byte, error) {
return BlockRootAtSlot(state, StartSlot(epoch))
}

View File

@@ -59,12 +59,11 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
// Return the beacon committee at ``slot`` for ``index``.
// """
// epoch = compute_epoch_at_slot(slot)
// committees_per_slot = get_committee_count_at_slot(state, slot)
// epoch_offset = index + (slot % SLOTS_PER_EPOCH) * committees_per_slot
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// return compute_committee(
// indices=get_active_validator_indices(state, epoch),
// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
// index=epoch_offset,
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committeeIndex uint64) ([]uint64, error) {
@@ -134,40 +133,20 @@ func ComputeCommittee(
start := sliceutil.SplitOffset(validatorCount, count, index)
end := sliceutil.SplitOffset(validatorCount, count, index+1)
if start > validatorCount || end > validatorCount {
return nil, errors.New("index out of range")
}
// Save the shuffled indices in cache, this is only needed once per epoch or once per new committee index.
shuffledIndices := make([]uint64, len(indices))
copy(shuffledIndices, indices)
// UnshuffleList is used here as it is an optimized implementation created
// for fast computation of committees.
// Reference implementation: https://github.com/protolambda/eth2-shuffle
shuffledList, err := UnshuffleList(shuffledIndices, seed)
return shuffledList[start:end], err
}
// AttestingIndices returns the attesting participants indices from the attestation data. The
// committee is provided as an argument rather than a direct implementation from the spec definition.
// Having the committee as an argument allows for re-use of beacon committees when possible.
//
// Spec pseudocode definition:
// def get_attesting_indices(state: BeaconState,
// data: AttestationData,
// bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Set[ValidatorIndex]:
// """
// Return the set of attesting indices corresponding to ``data`` and ``bits``.
// """
// committee = get_beacon_committee(state, data.slot, data.index)
// return set(index for i, index in enumerate(committee) if bits[i])
func AttestingIndices(bf bitfield.Bitfield, committee []uint64) ([]uint64, error) {
indices := make([]uint64, 0, len(committee))
indicesSet := make(map[uint64]bool, len(committee))
for i, idx := range committee {
if !indicesSet[idx] {
if bf.BitAt(uint64(i)) {
indices = append(indices, idx)
}
}
indicesSet[idx] = true
}
return indices, nil
}
// CommitteeAssignmentContainer represents a committee, index, and attester slot for a given epoch.
type CommitteeAssignmentContainer struct {
Committee []uint64
@@ -199,8 +178,12 @@ func CommitteeAssignments(
// Some validators may need to propose multiple times per epoch, so
// we use a map of proposer idx -> []slot to keep track of this possibility.
startSlot := StartSlot(epoch)
proposerIndexToSlots := make(map[uint64][]uint64)
proposerIndexToSlots := make(map[uint64][]uint64, params.BeaconConfig().SlotsPerEpoch)
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
if err := state.SetSlot(slot); err != nil {
return nil, nil, err
}
@@ -218,7 +201,7 @@ func CommitteeAssignments(
// Each slot in an epoch has a different set of committees. This value is derived from the
// active validator set, which does not change.
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
validatorIndexToCommittee := make(map[uint64]*CommitteeAssignmentContainer)
validatorIndexToCommittee := make(map[uint64]*CommitteeAssignmentContainer, numCommitteesPerSlot*params.BeaconConfig().SlotsPerEpoch)
// Compute all committees for all slots.
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -291,6 +274,7 @@ func ShuffledIndices(state *stateTrie.BeaconState, epoch uint64) ([]uint64, erro
return nil, err
}
// UnshuffleList is used as an optimized implementation for raw speed.
return UnshuffleList(indices, seed)
}
@@ -341,11 +325,12 @@ func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) er
if err != nil {
return nil
}
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices)
// The committee cache uses attester domain seed as key.
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}

View File

@@ -85,98 +85,13 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
}
func TestAttestationParticipants_NoCommitteeCache(t *testing.T) {
committeeSize := uint64(16)
validators := make([]*ethpb.Validator, committeeSize*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch,
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
attestationData := &ethpb.AttestationData{}
tests := []struct {
attestationSlot uint64
bitfield bitfield.Bitlist
wanted []uint64
}{
{
attestationSlot: 3,
bitfield: bitfield.Bitlist{0x07},
wanted: []uint64{344, 221},
},
{
attestationSlot: 2,
bitfield: bitfield.Bitlist{0x05},
wanted: []uint64{207},
},
{
attestationSlot: 11,
bitfield: bitfield.Bitlist{0x07},
wanted: []uint64{409, 213},
},
}
for _, tt := range tests {
attestationData.Target = &ethpb.Checkpoint{Epoch: 0}
attestationData.Slot = tt.attestationSlot
committee, err := BeaconCommitteeFromState(state, tt.attestationSlot, 0 /* committee index */)
if err != nil {
t.Error(err)
}
result, err := AttestingIndices(tt.bitfield, committee)
if err != nil {
t.Errorf("Failed to get attestation participants: %v", err)
}
if !reflect.DeepEqual(tt.wanted, result) {
t.Errorf(
"Result indices was an unexpected value. Wanted %d, got %d",
tt.wanted,
result,
)
}
}
}
func TestAttestationParticipants_EmptyBitfield(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
attestationData := &ethpb.AttestationData{Target: &ethpb.Checkpoint{}}
committee, err := BeaconCommitteeFromState(state, attestationData.Slot, attestationData.CommitteeIndex)
if err != nil {
t.Fatal(err)
}
indices, err := AttestingIndices(bitfield.NewBitlist(128), committee)
if err != nil {
t.Fatal(err)
}
if len(indices) != 0 {
t.Errorf("Attesting indices are non-zero despite an empty bitfield being provided; Size %d", len(indices))
func TestComputeCommittee_RegressionTest(t *testing.T) {
indices := []uint64{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
count := uint64(32)
if _, err := ComputeCommittee(indices, seed, index, count); err == nil {
t.Fatal("expected an error")
}
}
@@ -212,6 +127,40 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
}
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
var activationEpoch uint64
if i >= len(validators)/2 {
activationEpoch = 3
}
validators[i] = &ethpb.Validator{
ActivationEpoch: activationEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
if err != nil {
t.Fatal(err)
}
ClearCache()
_, proposerIndexToSlots, err := CommitteeAssignments(state, 0)
if err != nil {
t.Fatalf("failed to determine CommitteeAssignments: %v", err)
}
for _, slots := range proposerIndexToSlots {
for _, s := range slots {
if s == 0 {
t.Error("No proposer should be assigned to slot 0")
}
}
}
}
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -501,14 +450,14 @@ func TestShuffledIndices_ShuffleRightLength(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
validatorCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]uint64, validatorCount)
for i := 0; i < validatorCount; i++ {
for i := uint64(0); i < validatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
indices[i] = uint64(i)
indices[i] = i
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators,
@@ -533,7 +482,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if len(indices) != int(params.BeaconConfig().TargetCommitteeSize) {
if uint64(len(indices)) != params.BeaconConfig().TargetCommitteeSize {
t.Errorf("Did not save correct indices lengths, got %d wanted %d", len(indices), params.BeaconConfig().TargetCommitteeSize)
}
}

View File

@@ -45,12 +45,5 @@ func Seed(state *stateTrie.BeaconState, epoch uint64, domain [bls.DomainByteLeng
// """
// return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
func RandaoMix(state *stateTrie.BeaconState, epoch uint64) ([]byte, error) {
currMix, err := state.RandaoMixAtIndex(epoch % params.BeaconConfig().EpochsPerHistoricalVector)
if err != nil {
return nil, err
}
newMixLength := len(currMix)
newMix := make([]byte, newMixLength)
copy(newMix, currMix)
return newMix, nil
return state.RandaoMixAtIndex(epoch % params.BeaconConfig().EpochsPerHistoricalVector)
}

View File

@@ -26,8 +26,8 @@ func TotalBalance(state *stateTrie.BeaconState, indices []uint64) uint64 {
total += val.EffectiveBalance()
}
// Return EFFECTIVE_BALANCE_INCREMENT to avoid divisions by zero.
if total == 0 {
// EFFECTIVE_BALANCE_INCREMENT is the lower bound for total balance.
if total < params.BeaconConfig().EffectiveBalanceIncrement {
return params.BeaconConfig().EffectiveBalanceIncrement
}

View File

@@ -53,18 +53,18 @@ func TestShuffleList_OK(t *testing.T) {
func TestSplitIndices_OK(t *testing.T) {
var l []uint64
validators := 64000
for i := 0; i < validators; i++ {
l = append(l, uint64(i))
numValidators := uint64(64000)
for i := uint64(0); i < numValidators; i++ {
l = append(l, i)
}
split := SplitIndices(l, params.BeaconConfig().SlotsPerEpoch)
if len(split) != int(params.BeaconConfig().SlotsPerEpoch) {
if uint64(len(split)) != params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", params.BeaconConfig().SlotsPerEpoch, len(split))
}
for _, s := range split {
if len(s) != validators/int(params.BeaconConfig().SlotsPerEpoch) {
t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", validators/int(params.BeaconConfig().SlotsPerEpoch), len(s))
if uint64(len(s)) != numValidators/params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", numValidators/params.BeaconConfig().SlotsPerEpoch, len(s))
}
}
}

Some files were not shown because too many files have changed in this diff Show More