Compare commits

...

203 Commits

Author SHA1 Message Date
Nishant Das
4bc7cb6959 exit better (#7697) 2020-11-02 13:25:44 +08:00
Victor Farazdagi
3584bcba8e Fix param naming in BestNonFinalized (#7693) 2020-11-01 00:03:44 +00:00
Victor Farazdagi
926d3b9b34 Init-sync: more tests + minor refactoring (#7692)
* cherry-pick commits

* re-arrange calls
2020-10-31 21:33:57 +00:00
terence tsao
817c16a2f4 Use a short cut in migration step (#7686)
* Don't generate state if it's already in DB

* Use proper missing root

* Update logic for clarity
2020-10-31 19:01:05 +00:00
terence tsao
92b6e0b6af Add and use HasStateInCache (#7691) 2020-10-31 11:38:01 -07:00
Raul Jordan
b3155a04f5 Revert "Revert "Add In Progress Checker For Checkpoint Cache"" (#7690)
This reverts commit 46c04b98d9.
2020-10-31 15:59:50 +00:00
Nishant Das
df762bbfee cleaner logging (#7689) 2020-10-31 19:10:08 +08:00
terence tsao
f79b168ab2 Update to correct odering (#7688) 2020-10-31 10:08:22 +08:00
Nishant Das
211d9bc0b9 Update BLST And Herumi (#7632)
* fix build from source

* clean up

* update again

* change everything

* workaround for now

* fix versioning

* all passing now

* fix build issues

* clean up

* revert use of MulVerify

* gaz

* stub

* Apply suggestions from code review

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* fix all

* fix test

* todo

* fix stub

* revert back

* make deep source happy

* Update shared/bls/herumi/public_key.go

* Update shared/bls/blst/signature.go

* Update shared/bls/blst/signature_test.go

* imports

* move iface to common, export errors

* rm iface build

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-10-30 19:06:33 +00:00
terence tsao
386bfdd6eb Remove deprecated headstate method in DB (#7680)
* Remove head state in DB

* Clean up tests

* Fix pow tests

* Add stateGen to pow service
2020-10-30 15:11:08 +00:00
Raul Jordan
ddc8dc36f8 remove use of recover (#7683)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-10-30 14:40:26 +08:00
Preston Van Loon
99f15943a8 Fuzz: sigp/beaconfuzz#78 (#7684)
* Add failing test to verify https://github.com/sigp/beacon-fuzz/issues/78

* revert beacon-chain/core/blocks/spectest/attestation_test.go

* Describe bug in comments, fix bug

* 1
2020-10-30 02:06:53 +00:00
Raul Jordan
581bed2017 Load in JSON Validators for Genesis State Generation (#7643)
* load in genesis state from JSON validators

* completed feature with tests

* bazel change

* fix docker image

* deep source
2020-10-29 22:37:16 +00:00
Raul Jordan
2d4bfbbe31 Web UI Security Improvements (#7676)
* remove delete accounts

* check if user has not yet signed up
2020-10-29 16:38:47 -05:00
terence tsao
fb2dfec1f4 Use state by root to get finalized state (#7677) 2020-10-29 14:04:06 -05:00
Nishant Das
2e4dee5aeb Stategen Bug Fixes (#7674)
* bug fixes

* fix

* terence's review

* Make comments more explicit

* Update variable names

* Update tests

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-10-29 16:14:57 +00:00
Nishant Das
46c04b98d9 Revert "Add In Progress Checker For Checkpoint Cache" (#7672)
This reverts commit 37bf6617c0.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-29 14:35:30 +00:00
terence tsao
301499d134 Fill in blocks to fork choice store during init (#7665)
* Fill in blocks to fork choice store during init

* Use format option

* Use format option

* Use correct head block. Thanks Nishant

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-29 05:27:30 +00:00
Victor Farazdagi
4fc0a50569 Turn init-sync FSM logs to trace level (#7670)
* trace

* go fmt

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-29 04:55:07 +00:00
Preston Van Loon
c1c0b53c25 validateCommitteeIndexBeaconAttestation: Reorder boolean logic to reduce db calls (#7671)
* reorder boolean logic to shortcut

* same issue, in another place

* refactor for the greater good
2020-10-29 04:14:16 +00:00
Nishant Das
37bf6617c0 Add In Progress Checker For Checkpoint Cache (#7659)
* add inprogress checker

* fix test

* fix

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-29 01:21:49 +00:00
Preston Van Loon
149d3b84fa Check attestation target checkpoint epoch matches attestation slot (#7667)
* Check attestation target checkpoint is within current or previous epoch

* reject bad att where slot does not match target

* Add test, reduce redundant check
2020-10-29 00:52:18 +00:00
Preston Van Loon
5092093389 Revert "Remove redundant checks in processAttestation" (#7666)
This reverts commit 3b34954e75.
2020-10-28 22:46:26 +00:00
terence tsao
3b34954e75 Remove continue checks (#7663) 2020-10-28 13:04:28 -07:00
Preston Van Loon
ec5e59e212 remove unused and untested method (#7662)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-28 12:03:30 -07:00
Shay Zluf
7d1a1643ee Add validator datadir use (#7660)
* fix empty db prompt

* wording

* fix merge

* prompt only on datadir use

* remove move datadir function

* remove flag

* remove prompt text

* remove added prompt function

* gaz
2020-10-28 12:09:44 -05:00
Preston Van Loon
5f80754013 Revert "Remove Top Jaeger Span Offenders" (#7657)
This reverts commit ec8eab21ae.

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-28 06:07:14 +00:00
Raul Jordan
d9e4084d6d Remove Lock Contention in getAttPreState (#7656)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-28 05:42:54 +00:00
Raul Jordan
ec8eab21ae Remove Top Jaeger Span Offenders (#7655) 2020-10-28 05:14:54 +00:00
terence tsao
0cbd8bc03d Use root + slot for skip slot cache key (#7654)
* Update skip slot cache's key

* Add tests

* Refactor into its own function

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-28 03:09:05 +00:00
Victor Farazdagi
e57770bd0a Fix slasher ReadOnlyDatabase.BlockHeaders parameter names (#7652)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-28 02:36:02 +00:00
Raul Jordan
8c2fff3a75 Allow for 25th Word Passphrases in Mnemonics (#7645)
* advanced functionality, enable 25th word mnemonic passphrase

* 25th word passphrase

* add test

* added test to ensure differences when using the mnemonic 25th word

* better message

* passing tests

* fix up logic
2020-10-27 20:51:29 +00:00
Nishant Das
4f5726b3af Initialize State Correctly In Powchain (#7648)
* fix bug

* Update beacon-chain/powchain/service.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* return error

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-27 17:50:46 +00:00
Preston Van Loon
424488bf8a Deprecate db.HeadState (#7653)
* Deprecate db.HeadState

* Deprecate db.HeadState
2020-10-27 16:25:52 +00:00
Radosław Kapka
21c5ba8ed8 display that all validators are exited (#7651) 2020-10-27 09:29:58 -05:00
Victor Farazdagi
dbbbc7586f Remove redundant attribute in validator test (#7649)
* remove redundant attribute in validator test

* remove other attribs
2020-10-27 12:15:35 +00:00
Preston Van Loon
fcbb168c76 Code health: review map usage (#7635)
* remove unused cache states map

* correct typo

* Remove unused array

* Add lock around deposits cache chainstart pubkeys

* Copy attestation before grabbing lock. This may reduce lock contention time as other callers wanting the lock do not need to wait as long for the lock to become available.

* Copy attestation before grabbing lock. This may reduce lock contention time as other callers wanting the lock do not need to wait as long for the lock to become available.

* Set capacity to 1 since it is known that the slice will be 1 after insertion

* require validatorSlashingPreconditionCheck caller to hold lock

* Add lock for voluntary exits pool HasBeenIncluded

* Require rate limiter retrieveCollector to hold lock

* Add lock requirement assertions in sync

* Remove unused struct

* remove ClearCachedStates API

* field initSyncState is unused

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-26 21:17:07 +00:00
Nishant Das
f1bce1001d Restrict Proposer Lookup to the Current Epoch (#7542)
* checkpoint

* add test

* Update beacon-chain/core/helpers/committee_test.go

* preston's review

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-26 13:40:38 -07:00
terence tsao
ec77196197 Remove checkpoint info cache and usages (#7642)
* Remove checkpoint info cache and usages

* Gazelle
2020-10-26 14:09:19 -05:00
Raul Jordan
a468a12ef0 Allow Specifying Output Directory When Creating DB Backups (#7630)
* move flags

* backup db output dir flag

* fix build

* fix up broken backup test

* Radek's feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-26 18:20:54 +00:00
Radosław Kapka
b0dff891fc Invert enable-eth1data-majority-vote (#7362)
* make flag opt-out

* fix flags

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-26 17:36:47 +00:00
Raul Jordan
687251fedc keep asking for account pass if fails decrypt (#7631) 2020-10-26 17:56:57 +01:00
Alon Muroch
a04b7c2e4f Slasher highest source target (#7604)
* WIP - slasher highest attestation start

* fixed previous

* highest source and target

* highest attestation cache

* cleanup

* persist + fixes

* PR fixes and cleanup

* slashing proto

* highest att. api

* cleanup + tests

* increased highest att. cache to 300K

* removed highest att. api (for a separate PR)

* fixed linting

* bazel build fix

* highest att. kv test

* slasher highest att. test + purge + fix on eviction persist performance

* cleanup + linting

* linting + test fixes

* bazel gazelle run

* PR fixes

* run goimports

* go mod tidy

* ineffectual assignment fix

* run gazelle

* bazel gazelle run

* test fixes

* linter fix

* Apply suggestions from code review

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* goimports run

* cache tests

* A bunch of small fixes

* gazelle fix + gofmt

* merge fixes

* kv ordering fix

* small typos and text fixes

* capital letter fix

Co-authored-by: Shay Zluf <thezluf@gmail.com>
2020-10-26 14:15:42 +02:00
Victor Farazdagi
e6d688f6d5 Init-sync: re-arrange tests (#7641)
* move tests

* formatting

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-26 08:54:06 +00:00
Victor Farazdagi
8a3b75e9e3 Update init-sync FSM (#7640)
* update fsm

* more tests
2020-10-26 05:26:35 +00:00
terence tsao
a73c539fab Forkchoice: update read only lock to regular lock (#7633) 2020-10-24 16:35:18 +00:00
Nishant Das
92efe64b8a Fix Gossip Message ID (#7624)
* fix snappy errors

* gaz

* Update beacon-chain/p2p/pubsub.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-24 03:38:05 +00:00
Raul Jordan
b1c047b9ee Prevent Panics in Field Trie Helpers (#7613)
* add tests to prevent panics

* if > 0

* fix typ
2020-10-23 21:41:45 +00:00
Nishant Das
ff50ea2e0d Better Logging When Using Head Sync (#7626)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-23 21:00:53 +00:00
terence tsao
ebb3fa71f1 VerifyFinalizedConsistency - return early when canonical (#7628)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-23 19:02:20 +00:00
Preston Van Loon
9ea69a070e Fix attestation broadcast recovery metric (#7629) 2020-10-23 18:41:17 +00:00
Raul Jordan
c59edb3358 Revert "Use validator protection datadir" (#7627)
This reverts commit ab76bdad15.
2020-10-23 17:19:38 +00:00
Radosław Kapka
7e2112b4ba Fix panic issues on beacon node shutdown (#7625)
* allow service cleanup on node shutdown

* revert error to fatal

* remove unused struct field
2020-10-23 12:43:48 +00:00
bidlocode
cdbbf66027 Fix pregenesis countdown msg wording (#7477)
* fix wording

* fix gofmt

* fix msg

* gofmt

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-10-23 05:21:52 +00:00
Victor Farazdagi
e5e51e66e1 Handle voting ties in BestFinalized() (#7622) 2020-10-23 04:43:35 +00:00
Nishant Das
6eb022ffa1 Clean Up Block Retrieval Methods (#7593)
* fix and add tests

* return for 0

* add test case

* fix

* fix fail

* add comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-23 04:23:15 +00:00
Nishant Das
e776eb5409 Fix Powchain Error In Startup (#7621)
* fix error

* add test case
2020-10-23 03:57:19 +00:00
Victor Farazdagi
6a2bb65fe2 Fix BestFinalized method (#7619)
* turn tests to use testing vector

* add regression test case

* complete tests
2020-10-23 02:49:42 +00:00
Preston Van Loon
ecc25d2b8c Release and remove feature flag --enable-att-broadcast-discovery-attempts (#7610)
* Release and remove feature flag --enable-att-broadcast-discovery-attempts

* fix test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-23 01:53:50 +00:00
terence tsao
e07a12e6b7 Move IsCanonical implementation to forkchoice package (#7602)
* Move IsCanonical implementation to forkchoice pkg

* Remove debug print log

* Add tests

* Fixed current tests

* Fixed a test

* Fixed a test

* Add tests for cov
2020-10-23 01:32:13 +00:00
terence tsao
840ffc84ac Save state to DB during long non-finality (#7597)
* Starting saving state during hot

* Add a log

* Add helpers to turn on/off mode

* Add locks

* Add missing return

* Clean up

* Add logic to migration to handle db roots

* Add tests for on and off

* Add more tests

* Add test for migrate

* @prestonvanloon's feedback

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-23 00:35:30 +00:00
Raul Jordan
ca081e8639 Fatal If No TLS Found in RPC Service (#7614)
* fatal if no tls found rpc

* tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 23:58:53 +00:00
Raul Jordan
e54ac48f9d Deep Source Security Issues (#7617)
* deep source issues

* fix broken build

* fix beacon chain build

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-22 23:31:52 +00:00
Raul Jordan
075f1458b4 Do Not Recommend --ssl-no-revoke When Downloading Prysm Scripts (#7618)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 22:25:22 +00:00
Raul Jordan
be6481e178 Amend Cross Compile README (#7615)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 21:41:46 +00:00
Shay Zluf
ab76bdad15 Use validator protection datadir (#7355)
* Add validator protection db flag

* fix nil handling

* reuse datadir

* add datadir default config

* Add handling for moving account dir datafile to new set dir

* naming conditionals

* add tests

* fix test

* fix logic to default to wallet dir

* raul feedback

* nishant feedback

* gaz

* revert site_data changes

* fix formatting

* fix formatting

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-22 21:05:08 +00:00
Raul Jordan
e7723c4d1f Remove Deprecated Participation Metrics (#7616)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 20:14:58 +00:00
Raul Jordan
a688b9e030 Miscellaneous Docs Improvements (#7606)
* misc docs fixes

* revert readme

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 19:45:31 +00:00
Raul Jordan
ff15621fe1 Fix Nil Pointer in RPC List Peers (#7612)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 18:48:58 +00:00
Raul Jordan
4a78071e41 Explicit Password Requirements (#7607)
* explicit password requirements

* explicit reqs

* explicit reqs

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 18:02:30 +00:00
Raul Jordan
113b2cd6cf Clarify No Wallet Found Error Messages (#7609)
* clarify no wallet found error messages

* extract error

* fix tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 17:31:03 +00:00
Raul Jordan
13af8a7a37 Remove Unusued Base Parameter in SubscribeWithBase (#7608)
* remove base in sub with base param

* refactor subscribeWithBase signature
2020-10-22 16:56:24 +00:00
Nishant Das
7131cd991c Ignore PreGenesis Messages Instead Of Rejecting Them (#7550)
* ignorePreGenesisMessages

* fix test + handelaar's review

* fix issues

* change back

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 15:43:35 +00:00
Radosław Kapka
17a08a75ea Move voluntary exit logging to accounts_exit (#7603)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-22 15:01:11 +00:00
Nishant Das
ddbece5988 Fix Range Request Responses (#7531)
* checkpoint

* fix tests

* fix all tests

* imports

* gazelle

* reviews

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-22 09:33:01 -05:00
bidlocode
ff658ba641 Fix w3endpoint file relative path (#7557)
* add expandpath

* fix error description

* fix bazel & deepsource

* fix

* fix gofmt

* fix refactor

* add tests

* gofmt

* fix

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-22 05:53:03 +00:00
Victor Farazdagi
483f7f8177 Fix InitWithReset - rely on previously updated flags (#7598)
* Fix initWithReset to use previously set configuration

* further cleanup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 05:20:56 +00:00
Preston Van Loon
b4c1c1db9b Add lighthouse p2p Good Bye codes (#7601)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-22 00:35:10 +00:00
Preston Van Loon
70d923cf85 Change log level from panicf to errorf so that all services can be shutdown (#7600) 2020-10-21 23:41:01 +00:00
Raul Jordan
e4e8dd4838 Fix Web UI Integration With Prysm Beta (#7591)
* fix change password endpoint

* attempt same port as gateway

* update site to latest release

* final fixes

* rem gorilla mux

* gaz fix

* gaz fix

* fmt

* fix build

* fix flags
2020-10-21 17:32:00 -05:00
Daniel Pittman
553492e6e9 Update TERMS_OF_SERVICE.md (#7590)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-21 20:34:40 +00:00
terence tsao
9554ad3221 Exit early if there's no validator slashing (#7587)
* Add a logic to exit early if there's no slashing

* Apply the same idea to final update

* Add conditions to satisfy fuzz

* Revert final updates changes

* Add comments

* Add a new test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-21 19:12:39 +00:00
Raul Jordan
d2f7240255 Context Cancelation Handling for Hot States (#7596)
* attempt deadline for state info

* cancel check
2020-10-21 17:06:48 +00:00
terence tsao
42b7a37281 Add span for AncestorRoot (#7595)
* Add spans for AncestorRoot

* Add span for ancestor by DB

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-21 16:26:19 +00:00
Potuz
9a6a70e804 Add journald to log-format help (#7572)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-21 15:47:31 +00:00
bidlocode
9bd3cede23 Checking ToS cli flag in subcommands (#7588)
* check tos in subcmds

* fix
2020-10-21 10:13:46 -05:00
terence tsao
544dac298a Check ListValidatorBalances response length (#7583)
* Check length

* Add regression test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-20 20:45:38 +00:00
Raul Jordan
78ca8c9265 Add Accept Terms of Use Flags to Accounts Commands (#7584) 2020-10-20 20:12:49 +00:00
terence tsao
a5ce6db38e Update participation metrics (#7582)
* Update participation metrics

* Add unhappy tests
2020-10-20 14:14:24 -05:00
Victor Farazdagi
0b53a89d00 More verbose output on init-sync (#7578)
* more verbose output on init-sync

* log slot

* no error stack

* remove redundant data

* less verbose
2020-10-20 14:59:38 +00:00
Raul Jordan
fdef581e02 Invalidate JWT in Backend Logout (#7574)
* logout by invalidating in backend

* gaz

* regen pb.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-20 11:41:19 +00:00
Shay Zluf
7c5ee0a806 prune slasher attestations (#7335)
* prune slasher attestations

* remove unused code

* gaz

* remove unused methods from interface

* fix banchmark

* fix banchmark

* go.mod tidy

* preston feedback

* add test

* goimports

* fix test

* rm

* working version

* revert changes

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-20 14:09:01 +03:00
Nishant Das
81b553a00a Fix Overflow Bugs Introduced (#7579) 2020-10-20 06:18:03 +00:00
Raul Jordan
816eb94adf Validate JWT Signing Type in RPC (#7576)
* validate jwt signing type

* lint

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-20 05:37:12 +00:00
Radosław Kapka
ec0af98a9e Cache ETH1 headers on startup for eth1data voting (#7541)
* initial commit

* branch cleanup

* gzl

* underscore for unused parameteres

* caching implemented

* BlockExistsWithCache - tests

* add missing method to mock POWChain

* add missing method to faulty mock POWChain

* fix failing test

* add underflow checks

* change warning to error

* use helper to calculate end block

Co-authored-by: nisdas <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-20 05:10:27 +00:00
Raul Jordan
a39db494eb Remove Accounts Backup Validator RPC Functionality (#7575)
* remove accounts backup via RPC

* fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-20 04:33:22 +00:00
Nishant Das
05678b6724 Update to Discovery V5.1 (#7302)
* discoveryV5.1

* add seed node

* fix up

* checkpoint

* Add workaround for discv5.1 signature curve. Add discv5.1 catdog ENR

* remove dead code

* Add another catdog

* Fix bootnode

* fix docker img

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-20 04:05:48 +00:00
Raul Jordan
bec91d348e Use Password Specific to Web UI Instead of Wallet Password (#7569)
* use password specific to web UI

* fix up a few more tests

* tests passing

* gaz

* fix fmt
2020-10-20 02:26:31 +00:00
Raul Jordan
1bc86d2658 Remove Account Creation Privilege For Imported Keymanager (#7555)
* rem create

* remove create account privilege for nonhd wallets

* fix bazel

* radek feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-20 01:22:36 +00:00
terence tsao
9db6c0042b Add LMD FFG consistency check to aggregated attestation (#7573)
* Add lmd and ffg check

* Update tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 23:19:44 +00:00
terence tsao
3d70d757a1 Add checking for finalized consistency (#7568)
* Add checking for finalized consistency

* Add tests

* @prestonvanloon's feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 21:52:18 +00:00
terence tsao
e25cd08049 Rename unsafe-sync to head-sync (#7570)
* Rename unsafe-sync to head-sync

* Update comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 20:03:31 +00:00
Radosław Kapka
dccf0992e5 Verify eth1data vote in E2E (#7551)
* add majority vote to e2e

* extract policies to a separate package

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 19:35:34 +00:00
Raul Jordan
f6ed3f141a Remove Default Wallet Endpoint (#7571)
* remove default endpoint

* remove from required auth endpoints

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 19:05:51 +00:00
Radosław Kapka
88b2a4c905 Invert enable-pruning-deposit-proofs (#7565)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 18:22:44 +00:00
Preston Van Loon
ab40a112c5 Print the underlying error when attestation fails to get pre state (#7567)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-19 17:03:27 +00:00
Nishant Das
329fbff814 Use StateRoot as Key for Cache (#7540)
* checkpoint

* review

* comment

* fix bad bug

* fix up
2020-10-19 09:13:47 -07:00
Nishant Das
f31d40cf34 Fix Unsafe Sync Flag (#7563)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-18 22:28:44 +00:00
Nishant Das
49909ce351 Remove checkpoint cache flag from --dev (#7560)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-10-18 21:56:55 +00:00
bidlocode
53ab1dff6d Make ToS non-interactive err message more descriptive (#7562)
* add descriptive err msg

* Update shared/tos/tos.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-18 17:08:57 +00:00
Nishant Das
b502876e98 Fix Mismatched BlockRoots In State Replay (#7559)
* fix mismatched roots

* alternate approach

* preston's review

* comment
2020-10-18 09:39:27 -07:00
Radosław Kapka
f474c4b1c5 Don't show stack trace for certain voluntary exit failure scenarios (#7554)
* handle some scenarios more gracefully

* allow accounts access to core/blocks package
2020-10-17 10:08:58 +00:00
bidlocode
78fd3b8a87 Fix import cannot find the file on windows (#7556) 2020-10-17 06:00:13 +00:00
Raul Jordan
7e44d1eec7 Rename Direct Keymanager to Imported (#7549)
* rename direct to imported

* km

* fix more refs

* rename all instances appropriately

* rename instances of nonhd
2020-10-16 13:45:14 -05:00
bidlocode
9a0d579607 Fix voluntary exit --tls-cert flag (#7547)
* fix getting flag values from ctx

* fix

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2020-10-16 14:27:01 +00:00
Preston Van Loon
07e7e030d9 Update PubSub and include topic filter (#7496)
* Update pubsub and fix topicIDs

* WIP filter

* Add suggested code from @bidlocode

* add tests and fix bugs

* more tests

* Wait until state initialized to accept pubsub filtering

* rename for clarity and clarify comment

* fix test builds

* Autofix issues in 2 files

Resolved issues in the following files via DeepSource Autofix:
1. beacon-chain/p2p/pubsub_filter.go
2. beacon-chain/p2p/pubsub_filter_test.go

* @nisdas pr feedback

* pr feedback and fuzz fix

* Update beacon-chain/p2p/pubsub_filter.go

* Must have protocol suffix

* Must have protocol suffix

* gofmt

* rm test, fix panic

* Fix tests

* Add isInitialized check

* Add a few more tests for better coverage

* cache fork digest, make pubsub filter part of the p2p service

* rename service

* gofmt

* Add comment

* fix

Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-16 07:05:40 +00:00
Raul Jordan
a81c863ddb Rename Accounts-V2 to Accounts (#7545)
* rename accounts v2

* rename keymanager and fix imports

* rename accounts-v2 instances

* imports

* build

* build fix

* deepsource

* fix up broken aliases

* imports

* gaz

* Update validator/accounts/accounts_import_test.go

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* fmt

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
2020-10-15 22:31:52 +00:00
bidlocode
7aaefd123e Accept ToS: fix (#7539)
* fix create datadir on fresh run

* add test

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-15 21:30:38 +00:00
Raul Jordan
7abc1feaf5 Update Ethdo Keystore Deps (#7537)
* include old crypto code

* update gomod

* delete new util

* add versioning for determining derivation strategy

* add versioning

* tidy and gazelle viz

* fix tests

* fix broken

* workspace fix

* fix workspace

* revert

* fix test

* derived test fix
2020-10-15 15:33:10 -05:00
Shay Zluf
acf201428e Use new proposal protection format (#7518)
* Use new proposal protection format

* Update comments

* Split and merge with new db

* fix tests

* fix test

* optimize domain

* fix validation

* fix validation

* check import error

* fix e2e

* fix old propose tests add ign block test

* constant secret key

* static test for signing

* test domain

* fix testsplit

* gaz

* gaz

* tidy

* raul feedback

* fix tests

* tidy

* added info log for the migration

* gaz

* Update validator/client/propose_protect.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* nishant feedback

* import fix

* fix

* remove propose protection flag

* fix block sign test

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-15 19:35:31 +00:00
Ivan Martinez
daf0b51361 Eth2 API: Implement block endpoints (#7433)
* Start Beacon API outline

* Rename to v1

* Add impl

* Change to outline

* Add comments

* Remove unneeded items

* Fix linting

* tidy

* Fix visibility

* go.sum

* Fix deps

* Tidy

* Implement blocks API endpoints

* Add check for interface type and fix pointers

* Fix pointer name

* gaz

* Fix comments

* Fix imports

* Fix analysis

* Add more coverage

* Add coverage and fix errors

* Fix head test

* Fix test remove println

* Fix error text and cleanup

* Change tests to TDD

* Add tests for finalized

* Fix att test

* Fix analysis

* Fix go mo d

* Fix proto

* fix go mod

* Extend testing

* Fix tests

* Move migration to package and test block atts

* Fix migration

* Gaz

* Check for block canonical before returning

* Fix text

* Gaz

* Fix tests

* Fix tests

* Fix canonical

* Fix test again

* Fix tests

* Remove unneeded comment

* Plug in RPC service

* Fix err msg
2020-10-15 18:00:49 +00:00
Potuz
1462b7e57e Add show-private-keys flag to accounts-v2 list (#7487)
* Add show-private-keys flag to accoutns-v2 list

* fix fmt

* added tests and fixed return to 32 bytes

* deferred a lock and removed unused method

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-15 16:08:52 +00:00
Nishant Das
ec9c6f30bf Remove Local Protection Flag (#7543) 2020-10-15 14:52:45 +00:00
Nishant Das
0b64a335c8 check chainstart (#7494)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-15 22:03:48 +08:00
bidlocode
1caf2ca96f Accept ToS: fix paths for windows (#7535)
* fix paths for windows

* gofmt
2020-10-15 03:58:57 +00:00
bidlocode
5f9ea35b3f Add terms of use acceptance requirement (#7527)
* add accept tos

* fix typos

* add fixes

* check tos after config file loaded

* extend test

* add TERMS_OF_SERVICE.md

* fix

* fix typos

* add accept tos flag to e2e

* add flag to help

* add VerifyTosAcceptedOrPrompt to slasher

* fix gofmt

* fix import

* fix bazel
2020-10-15 02:05:30 +00:00
terence tsao
7076a1ec4a More feature flag deletions (#7533)
* Delete disable state lock and init sync verbose flags

* Delete disable slashing broadcast

* Remove disable wait for sync, noise, eth1 cache, static subnet

* Remove enable broadcast recovery attemp and make it as default

* Remove disable head update on per attestation

* Revert disable att braodcast discovery attempt

* gazelle

* Fixed an anti pattern

* Add enableAttBroadcastDiscoveryAttempts back

* Add back WaitForSync

* Remove extra lines

* Use DisableDynamicCommitteeSubnets path per @prestonvanloon feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 23:28:49 +00:00
Raul Jordan
a840fa563d Remove Accounts-V1 (#7532)
* remove accounts-v1

* get all tests to not panic

* all client tests passing

* fix node test

* eliminate old flags

* tidy up
2020-10-14 22:20:20 +00:00
Ivan Martinez
803d7c9bd2 Add functionality to retrieve all pending items from pools (#7530)
* Allow slashing pools to retrieve all items

* Add functionality to exits too to retrieve all exits

* Rename to noLimit

* ndo err

* Fix tests

* Fix test

* Fix test again

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-14 21:08:24 +00:00
Potuz
76300cef09 Recommend non-HD wallets (#7524)
* Recommend non-HD wallets

* Fix ordering

* set non-HD wallet as first option

* fix go fmt

* changed keymanager-types-Kind enum

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-14 19:55:47 +00:00
terence tsao
e5ed2cd141 Remove feature flags within the scope of pkg (#7511)
* Remove feature flags within the scope of pkg

* Remove DisableForkChoice

* Remove e2e usages

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 18:34:49 +00:00
Victor Farazdagi
a005c77b3f Update error usage patterns to go1.13+ (#7529)
* rely on errors.Is

* wrap errors
2020-10-14 17:39:52 +00:00
terence tsao
8f04c555fc More feature flags removal (#7526)
* Remove disable domain cache

* Remove don't verify att sig flag

* Remove batch verify and attester copies

* Remove batch verify usage

* Update tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 16:18:40 +00:00
Radosław Kapka
0a007384c8 E2E cleanup (#7519)
* e2e cleanup

* export EpochTicker

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 15:17:33 +00:00
Nishant Das
022b6667e5 Use Custom SSZ for P2P Types (#7436)
* checkpoint progress

* add roundtrip tests

* change all

* remove error response

* clean up

* Update beacon-chain/sync/error_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* gaz

* fix tests

* fmt

* gaz

* change back

* fix again

* clean up

* deep source

* fix all tests

* add gaz

* fix tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-14 07:55:28 +00:00
Ivan Martinez
022b09f2e2 Eth2 API: node endpoints outline (#7522)
* Nodev1 outline

* Gaz

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 06:11:11 +00:00
Raul Jordan
2f6f79724f Include Policy Bot Application (#7523)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 04:14:38 +00:00
terence tsao
7de161e917 Remove disable ssz cache feature flag (#7513)
* Remove ssz cache flag and usages

* Fix TestBeaconState_ProtoBeaconStateCompatibility

* gazelle

* Add cache flag back for tests

* Revert previous changes

* Revert previous changes

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 03:13:59 +00:00
Victor Farazdagi
b9844024b4 Update comments describing init-sync process (#7521)
* updates comments

* fetcher mode from config

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 01:44:46 +00:00
terence tsao
5cd6f65a2c Remove skip BLS verify flag (#7516)
* Remove skip BLS verify flag

* Update tests to use correct values

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-14 00:53:17 +00:00
Potuz
88083d1000 Add journald option for logger (#7463)
* Add journald option for logger

Fixes #7353

* fix docker images

* go mod tidy

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-14 00:10:57 +00:00
terence tsao
9e712e4598 Add justified block insertion to forkchoice when missed (#7520)
* Add justified block insertion to fork choice when missed

* Add regression test
2020-10-13 18:26:43 +00:00
Radosław Kapka
b742511193 Numeric conversion cleanup (#7465)
* validator count

* fix build errors

* fix test error

* randao mixes

* active indices

* lastReceivedMerkleIndex

* redundant conversions

* eth1FollowDistance

* refs

* fix compile error

* slasher

* validator

* revert changes to lastReceivedMerkleIndex

* remove todo

* fix references test

* fix committee test

* Revert "validator count"

This reverts commit 19b376e399.

# Conflicts:
#	beacon-chain/rpc/beacon/validators.go

* Revert "fix build errors"

This reverts commit f4acd6e977.

* Revert "fix test error"

This reverts commit 2a5c9eec63.

* Revert "randao mixes"

This reverts commit 2863f9c24d.

* Revert "active indices"

This reverts commit 6e8385f395.

* Revert "refs"

This reverts commit c64a153f67.

* Revert "fix references test"

This reverts commit fe773b55a6.

* Revert "fix committee test"

This reverts commit 7a0931c448.

* fix compilation errors

* Revert "slasher"

This reverts commit 8b34137931.

* trieutil

* some int() simplifications

* fix deepsource issues

* removed redundant fmt.Sprint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-13 12:43:49 +00:00
terence tsao
aaabec5cb7 Remove current deprecated flags (#7512)
* Remove deprecated flags

* Remove usages in config

* Gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-13 06:38:41 +00:00
terence tsao
e9c23673c5 Remove inital sync don't verify att sig flag (#7517) 2020-10-13 06:08:21 +00:00
Radosław Kapka
06d16a24b9 Reduce deposit memory (#7478)
* remove deposit proof and pending deposits

* wip

* wip

* wip

* clean up logs/comments

* function for pruning deposits and feature flag

* remove log

* add flag to beacon flags

* addressed PR feedback from terence

* add test assertions for errors

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-12 18:10:49 +00:00
Victor Farazdagi
ac1a4a078c Fix reported anti patterns (#7501)
* merge var decl

* single append

* replace bytes.Compare -> bytes.Equal

* remove redundant spritnf

* remove redundant spritnf

* trimprefix

* remove redundant nil check

* remove redundant return

* plain channel or unblock on context closing

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-12 16:12:00 +00:00
Victor Farazdagi
a019a0db4c Combines func params of the same type (#7500)
* combines func params

* update leftovers

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-12 15:43:19 +00:00
terence tsao
db48e12270 Update "Requesting parent block..." to debug level (#7508)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-10-12 14:41:59 +00:00
terence tsao
9434d66ad0 Update Zinken deployment block (#7507)
* Simplify comparisons

* Update Zinken deployment block
2020-10-12 13:53:56 +00:00
Shay Zluf
3d0fc8bc64 Import old attestation store (#7466)
* import attestation to new data structure

* add tests

* add failure massages

* added signing root to data

* added signing root to data

* public keys 48 length

* remove redundant loop

* fix proposals

* fix manage dir name

* Omit redundant nil check on slices

* nishant feedback

* add test
2020-10-12 13:43:42 +03:00
Preston Van Loon
7cc32c4dda Various code inspection resolutions (#7438)
* remove unused code

* remove defer use in loop

* Remove unused methods and constants

* gofmt and gaz

* nilness check

* remove unused args

* Add TODO for refactoring subscribeWithBase to remove unused arg. It seems too involved to include in this sweeping PR. https://github.com/prysmaticlabs/prysm/issues/7437

* replace empty slice declaration

* Remove unnecessary type conversions

* remove redundant type declaration

* rename receivers to be consistent

* Remove bootnode query tool. It is now obsolete by discv5

* Remove relay node. It is no longer used or supported

* Revert "Remove relay node. It is no longer used or supported"

This reverts commit 4bd7717334.

* Delete unused test directory

* Delete unsupported gcp startup script

* Delete old k8s script

* build fixes

* fix build

* go mod tidy

* revert slasher/db/kv/block_header.go

* fix build

* remove redundant nil check

* combine func args

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-12 08:11:05 +00:00
bidlocode
0214553415 Fix config file flags in subcommands (#7475) 2020-10-11 08:26:59 -07:00
terence tsao
b5a913d862 Simplify comparisons ​(X-Y) == 0 (#7495) 2020-10-10 16:19:59 +00:00
Nishant Das
43765b5cb0 Register Subscribers After Node Is Synced (#7468)
* wait for synced

* fix again

* add test

* fix all

* fixes deepsource reported issue

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-10 16:50:28 +08:00
Victor Farazdagi
4c09e59b3b Remove duplicate imports (#7491)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-10 03:54:31 +00:00
Raul Jordan
551b03d6e6 Resolve Web UI Beta Testing Bugs (#7471)
* more descriptive password validation error

* include change wallet password fixes

* balance and jwt improvements

* allow for different wallet dirs specified on startup

* ensure wallet password is always validated

* fix up prysm tests

* gaz

* test pass

* pass balances tests

* wrap up fixes

* radek feedback

* fix up tests

* cors fix

* add tests for validator status

* pass tests

* fix n

* skip content type test

* package level cache and send over feed

* package level cache for derived

* all tests passing

* gofmt

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-10 02:07:28 +00:00
Potuz
c4e64afd07 Report on duplicate validator keys during imports (#7459)
* Fix #7393

* fix go fmt

* Add a test

* Convert to map of strings

* @terencechain fixes

* Update validator/keymanager/v2/direct/import.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-10 01:45:26 +00:00
Victor Farazdagi
d98a6dda8f Verify interface compliance using typed nil instead of dereferencing and conversion (#7489)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-10 00:36:48 +00:00
terence tsao
8aaa5b6ad0 Refactor ancestor lookup (#7472)
* Add ancestor root fall back to DB check

* Add a warning message

* Refactor ancestor lookups into seperate functions

* Update error string

* Fix has parent check and add a regression test

* Add a conext error test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-10 00:07:56 +00:00
Victor Farazdagi
f92244d497 Remove redundant err checking (#7488)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-09 23:39:23 +00:00
Preston Van Loon
e91165b0b4 Update validator UI to 0.0.1-alpha (#7486)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-09 22:06:23 +00:00
terence tsao
e15a0b08aa Remove duplicated StartSlot (#7484)
* Remove duplicated StartSlot

* Update existing tests

* Gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-09 21:23:49 +00:00
Raul Jordan
a3a77ab5a8 Open Web UI in Browser Automatically (#7481)
* open web

* Update shared/browser/browser_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update shared/browser/browser.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* add to depz.bazel

* run gazelle

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-09 19:51:46 +00:00
terence tsao
650ec797da Add missing mainet spec test (#7482) 2020-10-09 18:58:19 +00:00
Nishant Das
f629c72107 Tighten Up Snappy Framing (#7479)
* fix framing

* fix up conditions

* fix

* clean up

* change back

* simpler

* no need to cast

* Use math.MaxInt64

* gaz, gofmt

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-09 16:50:18 +00:00
Preston Van Loon
98a20766c9 Enable coverage for deepsource (#7441)
* Enable coverage for deepsource

* Add gocovmerge tool

* go mod tidy
2020-10-09 09:15:55 -07:00
Preston Van Loon
1f707842d2 Serve Prysm Web UI from Validator (#7470)
* Prysm web UI basic idea

* Refactor to use shared.Service interface, add sanity test

* register web server

* Determine mimetype

* Use 4242 as port for web

* Allow localhost or 127.0.0.1 for CORS. More tests, commentary

* Add flags, add site_data.go

* ignore site data

* Add sha

* gofmt

* gofmt

* fix script

* Lints

* fix vis

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-09 09:58:30 -05:00
dv8silencer
c944f29c7c Fix --clear-db on Windows (#7474)
* fix with text

* fix also for validator and add test

* handle error

* fix another test

* handle error

* gazelle

* fix bug in slasher as well

* gazelle
2020-10-09 09:28:35 +00:00
terence tsao
6b5265d2d4 Update slashing precompute to use config instead of hardcoded value (#7473) 2020-10-08 19:49:20 -07:00
terence tsao
8f64eb622e Optimize IsValidAttestationIndices unique sorted indices check (#7458)
* Optimize unique sorted indices check

* Add err handling for benchmark

* Gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 23:20:18 +00:00
Preston Van Loon
4ddacd57c6 fix blst build tag (#7443)
* fix blst build tag

* fix fuzz build

* remove buildkite specific bazelrc line

* remove conflicting kafka gotag in CI

* fix again

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-10-08 18:37:12 +00:00
Nishant Das
a66f434236 Listen to all Network Interfaces for discovery (#7434)
* listen to all ifaces for discovery

* fix test

* fix tests

* fix all tests

* Revert "fix all tests"

This reverts commit a5d6fffd45.

* Revert "fix tests"

This reverts commit b7a0cf5609.

* Revert "fix test"

This reverts commit 57e6218c68.

* fix and use loopback

* do properly

* fix

* listen to all ifaces

* fix bootnode

* make it trace

* make it loopback

* swap out

* change back

* fix e2e

* fix

* Update beacon-chain/p2p/discovery.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 15:16:18 +00:00
dv8silencer
796c336a29 Fix wallet check for Windows by addresses differences in error message text (#7461)
* fix

* test

Co-authored-by: dv8silencer <15720668+dv8silencer@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-08 14:30:08 +00:00
Nishant Das
48fcb08ebc Eth1 Handling Cleanup (#7467)
* clean up

* remove test
2020-10-08 13:47:16 +00:00
Shay Zluf
1315a15d9d Import proposal protection (#7430)
* Import proposals protection db

* Import old proposal protection data structure to new one

* test empty slots

* Update validator/db/kv/new_proposal_history.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update validator/db/kv/new_proposal_history_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-08 11:51:50 +00:00
Radosław Kapka
25ebed9a70 Cache deposits only up to finalized block (#7453)
* cache deposits until finalized block

* take deposit index from state instead of block

* use stateByRoot

* use eth1data deposit count

* use depositCount-1

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 10:12:21 +00:00
terence tsao
703907bd99 Move ComputeProposerIndexWithValidators to test (#7460)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 09:24:17 +00:00
Preston Van Loon
d4e6ce6998 Fix deepsource findings (#7457)
* remove unused code

* Incomplete condition fix

* club append to single call

* anti-pattern fix 'should use for range instead of for { select {} }'

* use strings.ReplaceAll

* replace `len(parts[0]) == 0` with `parts[0] == ""`

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 08:36:47 +00:00
Preston Van Loon
390a589afb Disable BES in CI (#7462)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 07:17:35 +00:00
terence tsao
d34156bfe6 Add close to the body for e2e test (#7450)
* Add close to the body for e2e test

* Add error handling to defer

* Remove extra line

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-08 05:55:53 +00:00
bidlocode
7c8492e83f Add medalla flag (#7399)
* add --medalla flag for uniformity

* add enforcing testnet flag

* fix

* fix comments

* fix sufficient err msg

* fix shared method

* fix

* fix name

* fix name

* add test

* fix e2e

* fix gofmt

* fix test

* add warning

* fix warning

* fix warning test

* fix

* fix

* fix

* fix

* fix gofmt

* fix refactor to func

* add func description

* fix exported

* fix

* fix

* fix

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-07 23:04:19 +00:00
Preston Van Loon
668163d740 Revert PR #7429 to fix building on windows (#7456)
* Revert PR #7429 to fix building on windows

* Revert PR #7429 to fix building on windows

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-07 19:09:40 +00:00
terence tsao
7ad2929f0f Revamp proposer cache to fix lookahead bug (#7442)
* Add and test proposer indices cache

* Remove proposer indices usages from committee cache

* Adopt the new proposer indices cache

* Add comments on why genesis epoch is skipped

* Fix the failing tests

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/core/helpers/committee.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/core/helpers/slot_epoch.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/core/helpers/slot_epoch.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/core/helpers/slot_epoch_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/core/helpers/validators.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Address additional feedbacks from @prestonvanloon

* Add comments on why genesis epoch is skipped

* Refactor EndSlot to use StartSlot within

* Add proposer indices disabled

* Add libfuzz tags

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-07 17:25:08 +00:00
Victor Farazdagi
9ce64e2428 Refactor peer scorer into peerdata + scorers (#7452)
* updates comment

* manager -> service

* rename receiver

* refacgtor bad_responses

* refactor store

* update status service

* extends data service

* status service test

* refactor block provider scorer

* misc updates

* fix tests

* data -> peerdata

* gazelle

* peerdata/store test

* limit the visibility scope

* Nishant's suggestion

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-07 13:08:51 +00:00
terence tsao
ae78546323 Add Zinken flag to slasher (#7451)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-07 09:44:50 +00:00
Raul Jordan
23bce8d0c5 Include Deposit Data JSON in Wallet Create RPC Response (#7444)
* return deposit data for hd wallet create

* test added for deposit data json

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-07 02:15:54 +00:00
terence tsao
29137f7b39 Remove unused SlotSignature function (#7447)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-06 23:55:37 +00:00
Raul Jordan
bf4a8dcee9 Fix E2E Readme to Include Bazel Define Flag (#7449)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-06 20:49:29 +00:00
Shay Zluf
7b5f71229e New attestation store for local protection (#7248)
* Update attestation with new marshal unmarshal

* Add db methods to handle new slashing protection schema

* lint

* add tests

* goimports

* Apply suggestions from code review

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>

* const fix

* fix ineffectual assignments

* goimports

* fix import issue

* victor feedback fixes

* victor feedback fixes

* receiver methods

* receiver methods

* Update validator/db/kv/attestation_history_new.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update validator/db/kv/attestation_history_new.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update validator/db/kv/attestation_history_new.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update validator/db/kv/attestation_history_new.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update validator/db/kv/attestation_history_new_test.go

* type change

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-06 19:59:36 +00:00
Raul Jordan
4d7797827e Concurrent Websocket Handlers for Log Streaming (#7428)
* concurrent conn with mutex

* add buffer size

* only localhost

* clarify caller needs to hold lock

* Update shared/logutil/stream.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update shared/logutil/stream.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* fix up tests

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-06 13:32:48 +00:00
terence tsao
c0ed43d920 Refactor head info for better usages for lock (#7432)
* Refactor head info for better usages for lock

* Fix headroot test to use [32]byte{}

* go fmt: issue introduced in #7429

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-06 10:57:18 +00:00
Victor Farazdagi
842c15856b Apply go fmt + tidy: issue introduced in #7429 (#7446) 2020-10-06 09:05:32 +00:00
Potuz
20ac925ee4 Add a journald log format option. (#7429)
* Add a journald log format option.

Add an accepted value "journald" for the --log-format flag so that
logs are directed to journald with the correct error level.

* fix docker images

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-06 04:37:16 +00:00
Raul Jordan
6e8ff10003 deep source toml (#7439) 2020-10-05 14:07:49 -07:00
Victor Farazdagi
419fad07cd Handle nil header properly in powchain service tests (#7435) 2020-10-05 10:10:31 +00:00
Ivan Martinez
cf1c346beb Eth2 API: Add outline for beacon endpoints (#7408)
* Start Beacon API outline

* Rename to v1

* Add impl

* Change to outline

* Add comments

* Remove unneeded items

* Fix linting

* tidy

* Fix visibility

* go.sum

* Fix deps

* Tidy

* Add check for interface type and fix pointers

* Fix pointer name

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-10-05 00:15:27 +00:00
Radosław Kapka
3e0b20529b Miscellaneous code quality improvements (#7414)
* anti-patterns

* performance issues

* handle skipped defer

* gazelle fix

* misc bug risks

* make logging of proposer slashings more robust

* simplify calling span.End()

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-04 15:03:10 +00:00
Raul Jordan
d9ae2073e2 Pagination Added to ListAccounts Validator RPC Call (#7422)
* Pagination tests to list accounts

* fetch all

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-03 17:26:39 -05:00
terence tsao
7b3efcf62b Move read lock after hasHeadState (#7427)
* Move read lock before hasHeadState

* Export the locked version

* Update beacon-chain/blockchain/head.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update beacon-chain/blockchain/chain_info.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update beacon-chain/blockchain/chain_info.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

* Update beacon-chain/blockchain/chain_info.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-10-03 20:53:05 +00:00
terence tsao
c7d01fd73c Add lock around fork choice store votes (#7426)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-03 05:48:34 +00:00
Preston Van Loon
2916d183e8 Pubsub: ensure better TopicIDs validation (#7418)
* Ensure better topicIDs validation, add tests

* go mod tidy

* fix tests

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-10-03 01:51:58 +00:00
terence tsao
c24fb792cb Add locks around head getters (#7423) 2020-10-03 00:45:59 +00:00
754 changed files with 19490 additions and 16279 deletions

View File

@@ -5,6 +5,9 @@ test --test_verbose_timeout_warnings
test --build_tests_only
test --test_output=errors
# E2E run with debug gotag
test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
@@ -42,6 +45,7 @@ build:kafka_enabled --define kafka_enabled=true
build:kafka_enabled --define gotags=kafka_enabled
build:blst_enabled --define blst_enabled=true
build:blst_enabled --define gotags=blst_enabled
# Release flags
build:release --workspace_status_command=./scripts/workspace_status.sh

View File

@@ -34,19 +34,5 @@ build --flaky_test_attempts=5
# Disabled race detection due to unstable test results under constrained environment build kite
# build --features=race
# Enable kafka for CI tests only.
test --config=kafka_enabled
# Disable blst for CI tests.
test --define blst_enabled=true
build --bes_backend=grpcs://builds.prylabs.net:1985
build --bes_results_url=https://builds.prylabs.net/invocation/
# Disable flaky test detection for fuzzing.
test:fuzz --flaky_test_attempts=1
# Expose test environment variables in CI
test:fuzzit --test_env=GITHUB_REF
test:fuzzit --test_env=GITHUB_SHA
test:fuzzit --test_env=DOCKER_HOST

12
.deepsource.toml Normal file
View File

@@ -0,0 +1,12 @@
version = 1
[[analyzers]]
name = "go"
enabled = true
[analyzers.meta]
import_paths = ["github.com/prysmaticlabs/prysm"]
[[analyzers]]
name = "test-coverage"
enabled = true

91
.policy.yml Normal file
View File

@@ -0,0 +1,91 @@
policy:
approval:
- or:
- only test files are changed
- only proto files are changed
- touches consensus critical code
- touches sync/blockchain/p2p code
- large line count
- is critical priority
approval_rules:
- name: only test files are changed
if:
only_changed_files:
paths:
- "*_test.go"
- "*.bazel"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 1
teams:
- "prysmaticlabs/core-team"
- name: only proto files are changed
if:
only_changed_files:
paths:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 1
teams:
- "prysmaticlabs/core-team"
- name: touches consensus critical code
if:
only_changed_files:
paths:
- "beacon-chain/core/*"
- "beacon-chain/state/*"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 2
teams:
- "prysmaticlabs/core-team"
- name: touches sync/blockchain/p2p code
if:
only_changed_files:
paths:
- "beacon-chain/blockchain/*"
- "beacon-chain/sync/*"
- "beacon-chain/p2p/*"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 2
teams:
- "prysmaticlabs/core-team"
- name: large line count
if:
modified_lines:
total: "> 1000"
changed_files:
ignore:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 2
teams:
- "prysmaticlabs/core-team"
- name: is critical priority
if:
has_labels:
- "Priority: Critical"
options:
ignore_commits_by:
users: ["bulldozer[bot]"]
requires:
count: 3
teams:
- "prysmaticlabs/core-team"

45
TERMS_OF_SERVICE.md Normal file
View File

@@ -0,0 +1,45 @@
## Terms of Use
Effective as of Oct 14, 2020
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
### About Prysm
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
### Licensing Terms
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
### Risks of Operating Prysm
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isnt included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
### Warranty Disclaimer
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
### Limitation of Liability
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
### Indemnification
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
damages, costs, and expenses (including reasonable attorneys fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
### Compliance with Laws and Tax Obligations
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
### Miscellaneous
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that partys right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.

View File

@@ -161,6 +161,10 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
@@ -215,8 +219,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "7e5f838e0f9110471ef8be9401ea687a8ed4d499664dc0eac34ecfdfd03c2ac3",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.12.3/general.tar.gz",
sha256 = "6b3498001de98c477aa2c256beffc20a85ce1b12b8e0f8e88502a5c3a18c01de",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0-rc.0/general.tar.gz",
)
http_archive(
@@ -336,6 +340,21 @@ go_binary(
urls = ["https://github.com/ferranbt/fastssz/archive/06015a5d84f9e4eefe2c21377ca678fa8f1a1b09.tar.gz"],
)
http_archive(
name = "prysm_web_ui",
build_file_content = """
filegroup(
name = "site",
srcs = glob(["**/*"]),
visibility = ["//visibility:public"],
)
""",
sha256 = "6bb16ff0dc9348090cc31a9ea453643d32b617e66ac6e7bb38985d530070631b",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/0.0.2-alpha/prysm-web-ui.tar.gz",
],
)
load("//:deps.bzl", "prysm_deps")
# gazelle:repository_macro deps.bzl%prysm_deps

View File

@@ -19,15 +19,16 @@ go_library(
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/journald:go_default_library",
"//shared/logutil:go_default_library",
"//shared/maxprocs:go_default_library",
"//shared/tos:go_default_library",
"//shared/version:go_default_library",
"@com_github_ethereum_go_ethereum//log:go_default_library",
"@com_github_ipfs_go_log_v2//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@com_github_urfave_cli_v2//altsrc:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
],
)
@@ -53,19 +54,20 @@ go_image(
deps = [
"//beacon-chain/flags:go_default_library",
"//beacon-chain/node:go_default_library",
"//shared/tos:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/journald:go_default_library",
"//shared/logutil:go_default_library",
"//shared/maxprocs:go_default_library",
"//shared/version:go_default_library",
"@com_github_ethereum_go_ethereum//log:go_default_library",
"@com_github_ipfs_go_log_v2//:go_default_library",
"@com_github_joonix_log//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@com_github_urfave_cli_v2//altsrc:go_default_library",
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
"//shared/maxprocs:go_default_library",
],
)

View File

@@ -5,7 +5,6 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"checkpoint_info_cache.go",
"head.go",
"info.go",
"init_sync_process_block.go",
@@ -51,13 +50,11 @@ go_library(
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/timeutils:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -80,9 +77,9 @@ go_test(
size = "medium",
srcs = [
"chain_info_test.go",
"checkpoint_info_cache_test.go",
"head_test.go",
"info_test.go",
"metrics_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",

View File

@@ -97,6 +97,9 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
// HeadSlot returns the slot of the head of the chain.
func (s *Service) HeadSlot() uint64 {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return 0
}
@@ -106,6 +109,9 @@ func (s *Service) HeadSlot() uint64 {
// HeadRoot returns the root of the head of the chain.
func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.headRoot() != params.BeaconConfig().ZeroHash {
r := s.headRoot()
return r[:], nil
@@ -131,6 +137,9 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.hasHeadState() {
return s.headBlock(), nil
}
@@ -144,6 +153,8 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
@@ -152,11 +163,14 @@ func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
return s.headState(ctx), nil
}
return s.beaconDB.HeadState(ctx)
return s.stateGen.StateByRoot(ctx, s.headRoot())
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return []uint64{}, nil
}
@@ -165,6 +179,9 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]ui
// HeadSeed returns the seed from the head view of a given epoch.
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}, nil
}
@@ -174,6 +191,9 @@ func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
@@ -183,6 +203,9 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
// HeadETH1Data returns the eth1data of the current head state.
func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return &ethpb.Eth1Data{}
}
@@ -202,6 +225,9 @@ func (s *Service) GenesisTime() time.Time {
// GenesisValidatorRoot returns the genesis validator
// root of the chain.
func (s *Service) GenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
@@ -210,6 +236,9 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
// CurrentFork retrieves the latest fork information of the beacon chain.
func (s *Service) CurrentFork() *pb.Fork {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return &pb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
@@ -226,9 +255,6 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
return true, nil
}
// If the block has not been finalized, the block must be recent. Check recent canonical roots
// mapping which uses proto array fork choice.
s.recentCanonicalBlocksLock.RLock()
defer s.recentCanonicalBlocksLock.RUnlock()
return s.recentCanonicalBlocks[blockRoot], nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.forkChoiceStore.IsCanonical(blockRoot), nil
}

View File

@@ -18,9 +18,9 @@ import (
)
// Ensure Service implements chain info interface.
var _ = ChainInfoFetcher(&Service{})
var _ = TimeFetcher(&Service{})
var _ = ForkFetcher(&Service{})
var _ ChainInfoFetcher = (*Service)(nil)
var _ TimeFetcher = (*Service)(nil)
var _ ForkFetcher = (*Service)(nil)
func TestFinalizedCheckpt_Nil(t *testing.T) {
db, sc := testDB.SetupDB(t)
@@ -103,13 +103,15 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
s, err := state.InitializeFromProto(&pb.BeaconState{})
require.NoError(t, err)
c.head = &head{slot: 100, state: s}
assert.Equal(t, uint64(100), c.headSlot())
assert.Equal(t, uint64(100), c.HeadSlot())
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
assert.Equal(t, [32]byte{'A'}, c.headRoot())
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
}
func TestHeadBlock_CanRetrieve(t *testing.T) {
@@ -179,3 +181,23 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
t.Error("Received incorrect eth1 data")
}
}
func TestIsCanonical_Ok(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
assert.Equal(t, true, can)
can, err = c.IsCanonical(ctx, [32]byte{'a'})
require.NoError(t, err)
assert.Equal(t, false, can)
}

View File

@@ -1,83 +0,0 @@
package blockchain
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
var (
// This defines the max number of checkpoint info this cache can store.
// Each cache is calculated at 3MB(30K validators), the total cache size is around 100MB.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxInfoSize = 32
// This tracks the number of check point info requests that aren't present in the cache.
infoMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "check_point_info_cache_miss",
Help: "The number of check point info requests that aren't present in the cache.",
})
// This tracks the number of check point info requests that are in the cache.
infoHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "check_point_info_cache_hit",
Help: "The number of check point info requests that are present in the cache.",
})
)
// checkPtInfoCache is a struct with 1 LRU cache for looking up check point info by checkpoint.
type checkPtInfoCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// newCheckPointInfoCache creates a new checkpoint info cache for storing/accessing processed check point info object.
func newCheckPointInfoCache() *checkPtInfoCache {
cache, err := lru.New(maxInfoSize)
if err != nil {
panic(err)
}
return &checkPtInfoCache{
cache: cache,
}
}
// get fetches check point info by check point. Returns the reference of the CheckPtInfo, nil if doesn't exist.
func (c *checkPtInfoCache) get(cp *ethpb.Checkpoint) (*pb.CheckPtInfo, error) {
c.lock.RLock()
defer c.lock.RUnlock()
h, err := hashutil.HashProto(cp)
if err != nil {
return nil, err
}
item, exists := c.cache.Get(h)
if exists && item != nil {
infoHit.Inc()
// Copy here is unnecessary since the returned check point info object
// will only be used to verify attestation signature.
return item.(*pb.CheckPtInfo), nil
}
infoMiss.Inc()
return nil, nil
}
// put adds CheckPtInfo object to the cache. This method also trims the least
// recently added CheckPtInfo object if the cache size has ready the max cache size limit.
func (c *checkPtInfoCache) put(cp *ethpb.Checkpoint, info *pb.CheckPtInfo) error {
c.lock.Lock()
defer c.lock.Unlock()
h, err := hashutil.HashProto(cp)
if err != nil {
return err
}
c.cache.Add(h, info)
return nil
}

View File

@@ -1,41 +0,0 @@
package blockchain
import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestHotStateCache_RoundTrip(t *testing.T) {
c := newCheckPointInfoCache()
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte{'a'}, 32),
}
info, err := c.get(cp)
require.NoError(t, err)
require.Equal(t, (*pb.CheckPtInfo)(nil), info)
i := &pb.CheckPtInfo{
Seed: bytesutil.PadTo([]byte{'c'}, 32),
GenesisRoot: bytesutil.PadTo([]byte{'d'}, 32),
ActiveIndices: []uint64{0, 1, 2, 3},
}
require.NoError(t, c.put(cp, i))
info, err = c.get(cp)
require.NoError(t, err)
require.DeepEqual(t, info, i)
}
func TestHotStateCache_CanPrune(t *testing.T) {
c := newCheckPointInfoCache()
for i := 0; i < maxInfoSize+1; i++ {
cp := &ethpb.Checkpoint{Epoch: uint64(i), Root: make([]byte, 32)}
require.NoError(t, c.put(cp, &pb.CheckPtInfo{}))
}
require.Equal(t, len(c.cache.Keys()), maxInfoSize)
}

View File

@@ -11,7 +11,6 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -24,7 +23,7 @@ type head struct {
slot uint64 // current head slot.
root [32]byte // current head root.
block *ethpb.SignedBeaconBlock // current head block.
state *state.BeaconState // current head state.
state *stateTrie.BeaconState // current head state.
}
// Determined the head from the fork choice service and saves its new data
@@ -52,12 +51,24 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
if headStartRoot == params.BeaconConfig().ZeroHash {
headStartRoot = s.genesisRoot
}
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
// In order to process head, fork choice store requires justified info.
// If the fork choice store is missing justified block info, a node should
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.forkChoiceStore.HasNode(headStartRoot) {
jb, err := s.beaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
}
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
return err
}
}
if err := s.updateRecentCanonicalBlocks(ctx, headRoot); err != nil {
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
@@ -72,7 +83,11 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
defer span.End()
// Do nothing if head hasn't changed.
if headRoot == s.headRoot() {
r, err := s.HeadRoot(ctx)
if err != nil {
return err
}
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
@@ -101,16 +116,17 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != s.headRoot() {
headSlot := s.HeadSlot()
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
"oldSlot": fmt.Sprintf("%d", s.headSlot()),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &statefeed.ReorgData{
NewSlot: newHeadBlock.Block.Slot,
OldSlot: s.headSlot(),
OldSlot: headSlot,
},
})
@@ -131,7 +147,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *state.BeaconState) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *stateTrie.BeaconState) error {
cachedHeadRoot, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root from cache")
@@ -149,7 +165,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -165,7 +181,7 @@ func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -179,89 +195,57 @@ func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlo
}
// This returns the head slot.
// This is a lock free version.
func (s *Service) headSlot() uint64 {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.slot
}
// This returns the head root.
// It does a full copy on head root for immutability.
// This is a lock free version.
func (s *Service) headRoot() [32]byte {
if s.head == nil {
return params.BeaconConfig().ZeroHash
}
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.root
}
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
s.headLock.RLock()
defer s.headLock.RUnlock()
return stateTrie.CopySignedBeaconBlock(s.head.block)
}
// This returns the head state.
// It does a full copy on head state for immutability.
// This is a lock free version.
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head.state.Copy()
}
// This returns the genesis validator root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// Returns true if head state exists.
func (s *Service) hasHeadState() bool {
// HasHeadState returns true if head state exists.
func (s *Service) HasHeadState() bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.head != nil && s.head.state != nil
return s.hasHeadState()
}
// This updates recent canonical block mapping. It uses input head root and retrieves
// all the canonical block roots that are ancestor of the input head block root.
func (s *Service) updateRecentCanonicalBlocks(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateRecentCanonicalBlocks")
defer span.End()
s.recentCanonicalBlocksLock.Lock()
defer s.recentCanonicalBlocksLock.Unlock()
s.recentCanonicalBlocks = make(map[[32]byte]bool)
s.recentCanonicalBlocks[headRoot] = true
nodes := s.forkChoiceStore.Nodes()
node := s.forkChoiceStore.Node(headRoot)
if node == nil {
return nil
}
for node.Parent() != protoarray.NonExistentNode {
if ctx.Err() != nil {
return ctx.Err()
}
node = nodes[node.Parent()]
s.recentCanonicalBlocks[node.Root()] = true
}
return nil
// Returns true if head state exists.
// This is the lock free version.
func (s *Service) hasHeadState() bool {
return s.head != nil && s.head.state != nil
}
// This caches justified state balances to be used for fork choice.

View File

@@ -5,6 +5,7 @@ import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -93,39 +94,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.LogsContain(t, hook, "Chain reorg occurred")
}
func TestUpdateRecentCanonicalBlocks_CanUpdateWithoutParent(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
r := [32]byte{'a'}
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), r))
canonical, err := service.IsCanonical(context.Background(), r)
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
}
func TestUpdateRecentCanonicalBlocks_CanUpdateWithParent(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
oldHead := [32]byte{'a'}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 1, oldHead, [32]byte{'g'}, [32]byte{}, 0, 0))
currentHead := [32]byte{'b'}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 3, currentHead, oldHead, [32]byte{}, 0, 0))
forkedRoot := [32]byte{'c'}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 2, forkedRoot, oldHead, [32]byte{}, 0, 0))
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), currentHead))
canonical, err := service.IsCanonical(context.Background(), currentHead)
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
canonical, err = service.IsCanonical(context.Background(), oldHead)
require.NoError(t, err)
assert.Equal(t, true, canonical, "Block should be canonical")
canonical, err = service.IsCanonical(context.Background(), forkedRoot)
require.NoError(t, err)
assert.Equal(t, false, canonical, "Block should not be canonical")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
@@ -137,3 +105,19 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
b := testutil.NewBeaconBlock()
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
}

View File

@@ -34,7 +34,12 @@ const template = `<html>
// TreeHandler is a handler to serve /tree page in metrics.
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
if s.headState(r.Context()) == nil {
headState, err := s.HeadState(r.Context())
if err != nil {
log.WithError(err).Error("Could not get head state")
return
}
if headState == nil {
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
@@ -47,7 +52,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
graph.Attr("labeljust", "l")
dotNodes := make([]*dot.Node, len(nodes))
avgBalance := uint64(averageBalance(s.headState(r.Context()).Balances()))
avgBalance := uint64(averageBalance(headState.Balances()))
for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node.
@@ -63,7 +68,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
dotN = graph.Node(index).Box().Attr("label", label)
}
if nodes[i].Slot() == s.headSlot() &&
if nodes[i].Slot() == s.HeadSlot() &&
nodes[i].BestDescendant() == ^uint64(0) &&
nodes[i].Parent() != ^uint64(0) {
dotN = dotN.Attr("color", "green")

View File

@@ -1,17 +1,15 @@
package blockchain
import (
"time"
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
var (
@@ -100,13 +98,6 @@ var (
Name: "beacon_reorg_total",
Help: "Count the number of times beacon chain has a reorg",
})
sentBlockPropagationHistogram = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "block_sent_latency_milliseconds",
Help: "Captures blocks broadcast time. Blocks sent in milliseconds distribution",
Buckets: []float64{1000, 2000, 3000, 4000, 5000, 6000},
},
)
attestationInclusionDelay = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "attestation_inclusion_delay_slots",
@@ -114,19 +105,10 @@ var (
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
},
)
// TODO(7141): Remove deprecated metrics below.
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_eligible_balances",
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
})
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_voted_target_balances",
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
})
)
// reportSlotMetrics reports slot related metrics.
func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
clockTimeSlot.Set(float64(clockSlot))
beaconSlot.Set(float64(stateSlot))
beaconHeadSlot.Set(float64(headSlot))
@@ -137,8 +119,8 @@ func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, fina
}
// reportEpochMetrics reports epoch related metrics.
func reportEpochMetrics(state *stateTrie.BeaconState) {
currentEpoch := state.Slot() / params.BeaconConfig().SlotsPerEpoch
func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.BeaconState) error {
currentEpoch := postState.Slot() / params.BeaconConfig().SlotsPerEpoch
// Validator instances
pendingInstances := 0
@@ -156,8 +138,8 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
slashingBalance := uint64(0)
slashingEffectiveBalance := uint64(0)
for i, validator := range state.Validators() {
bal, err := state.BalanceAtIndex(uint64(i))
for i, validator := range postState.Validators() {
bal, err := postState.BalanceAtIndex(uint64(i))
if err != nil {
log.Errorf("Could not load validator balance: %v", err)
continue
@@ -206,41 +188,36 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
// Last justified slot
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint().Epoch))
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint().Root)))
beaconCurrentJustifiedEpoch.Set(float64(postState.CurrentJustifiedCheckpoint().Epoch))
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.CurrentJustifiedCheckpoint().Root)))
// Last previous justified slot
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint().Epoch))
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint().Root)))
beaconPrevJustifiedEpoch.Set(float64(postState.PreviousJustifiedCheckpoint().Epoch))
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.PreviousJustifiedCheckpoint().Root)))
// Last finalized slot
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpointEpoch()))
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint().Root)))
beaconFinalizedEpoch.Set(float64(postState.FinalizedCheckpointEpoch()))
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
currentEth1DataDepositCount.Set(float64(state.Eth1Data().DepositCount))
if precompute.Balances != nil {
totalEligibleBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
prevEpochActiveBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(precompute.Balances.PrevEpochAttested))
prevEpochTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
prevEpochHeadBalances.Set(float64(precompute.Balances.PrevEpochHeadAttested))
}
refMap := state.FieldReferencesCount()
for name, val := range refMap {
stateTrieReferences.WithLabelValues(name).Set(float64(val))
}
}
// This captures metrics for block sent time by subtracts slot start time.
func captureSentTimeMetric(genesisTime uint64, currentSlot uint64) error {
startTime, err := helpers.SlotToTime(genesisTime, currentSlot)
// Validator participation should be viewed on the canonical chain.
v, b, err := precompute.New(ctx, headState)
if err != nil {
return err
}
diffMs := timeutils.Now().Sub(startTime) / time.Millisecond
sentBlockPropagationHistogram.Observe(float64(diffMs))
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
if err != nil {
return err
}
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
prevEpochHeadBalances.Set(float64(b.PrevEpochHeadAttested))
refMap := postState.FieldReferencesCount()
for name, val := range refMap {
stateTrieReferences.WithLabelValues(name).Set(float64(val))
}
return nil
}

View File

@@ -0,0 +1,26 @@
package blockchain
import (
"context"
"testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
s := testutil.NewBeaconState()
h := testutil.NewBeaconState()
require.NoError(t, h.SetValidators(nil))
err := reportEpochMetrics(context.Background(), s, h)
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
}
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
s := testutil.NewBeaconState()
h := testutil.NewBeaconState()
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 0}}))
err := reportEpochMetrics(context.Background(), s, h)
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
}

View File

@@ -8,10 +8,7 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils"
"go.opencensus.io/trace"
@@ -69,53 +66,6 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui
return nil, ErrTargetRootNotInDB
}
if featureconfig.Get().UseCheckPointInfoCache {
c, err := s.AttestationCheckPtInfo(ctx, a)
if err != nil {
return nil, err
}
if err := s.verifyAttTargetEpoch(ctx, uint64(s.genesisTime.Unix()), uint64(timeutils.Now().Unix()), tgt); err != nil {
return nil, err
}
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
return nil, errors.Wrap(err, "could not verify attestation beacon block")
}
if err := s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot); err != nil {
return nil, errors.Wrap(err, "could not verify attestation beacon block")
}
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, err
}
committee, err := helpers.BeaconCommittee(c.ActiveIndices, bytesutil.ToBytes32(c.Seed), a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
return nil, err
}
domain, err := helpers.Domain(c.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, c.GenesisRoot)
if err != nil {
return nil, err
}
indices := indexedAtt.AttestingIndices
pubkeys := []bls.PublicKey{}
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := c.PubKeys[indices[i]]
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
if err != nil {
return nil, err
}
pubkeys = append(pubkeys, pk)
}
if err := attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain); err != nil {
return nil, err
}
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
return indexedAtt.AttestingIndices, nil
}
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
// save it to the cache.
baseState, err := s.getAttPreState(ctx, tgt)

View File

@@ -7,11 +7,11 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -19,10 +19,7 @@ import (
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
s.checkpointStateLock.Lock()
defer s.checkpointStateLock.Unlock()
cachedState, err := s.checkpointState.StateByCheckpoint(c)
cachedState, err := s.checkpointStateCache.StateByCheckpoint(ctx, c)
if err != nil {
return nil, errors.Wrap(err, "could not get cached checkpoint state")
}
@@ -30,103 +27,59 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
return cachedState, nil
}
if err := s.checkpointStateCache.MarkInProgress(c); err != nil {
if errors.Is(err, cache.ErrAlreadyInProgress) {
cachedState, err = s.checkpointStateCache.StateByCheckpoint(ctx, c)
if err != nil {
return nil, errors.Wrap(err, "could not get cached checkpoint state")
}
if cachedState != nil {
return cachedState, nil
}
} else {
return nil, err
}
}
defer func() {
if err := s.checkpointStateCache.MarkNotInProgress(c); err != nil {
log.WithError(err).Error("Failed to mark cache not in progress")
}
}()
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
}
epochStartSlot, err := helpers.StartSlot(c.Epoch)
if err != nil {
return nil, err
}
if epochStartSlot > baseState.Slot() {
baseState = baseState.Copy()
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
}
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return baseState, nil
}
has, err := s.stateGen.HasState(ctx, bytesutil.ToBytes32(c.Root))
// To avoid sharing the same state across checkpoint state cache and hot state cache,
// we don't add the state to check point cache.
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, err
}
if !has {
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
}
return baseState, nil
}
// getAttCheckPtInfo retrieves the check point info given a check point. Check point info enables the node
// to efficiently verify attestation signature without using beacon state. This function utilizes
// the checkpoint info cache and will update the check point info cache on miss.
func (s *Service) getAttCheckPtInfo(ctx context.Context, c *ethpb.Checkpoint, e uint64) (*pb.CheckPtInfo, error) {
// Return checkpoint info if exists in cache.
info, err := s.checkPtInfoCache.get(c)
if err != nil {
return nil, errors.Wrap(err, "could not get cached checkpoint state")
}
if info != nil {
return info, nil
}
// Retrieve checkpoint state to compute checkpoint info.
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
}
epochStartSlot, err := helpers.StartSlot(c.Epoch)
if err != nil {
return nil, err
}
if epochStartSlot > baseState.Slot() {
baseState = baseState.Copy()
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
if err != nil {
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
}
}
f := baseState.Fork()
g := bytesutil.ToBytes32(baseState.GenesisValidatorRoot())
seed, err := helpers.Seed(baseState, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, err
}
indices, err := helpers.ActiveValidatorIndices(baseState, e)
if err != nil {
return nil, err
}
validators := baseState.ValidatorsReadOnly()
pks := make([][]byte, len(validators))
for i := 0; i < len(pks); i++ {
pk := validators[i].PublicKey()
pks[i] = pk[:]
}
// Cache and return the checkpoint info.
info = &pb.CheckPtInfo{
Fork: f,
GenesisRoot: g[:],
Seed: seed[:],
ActiveIndices: indices,
PubKeys: pks,
}
if err := s.checkPtInfoCache.put(c, info); err != nil {
return nil, err
}
return info, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
currentEpoch := helpers.SlotToEpoch(currentSlot)
var prevEpoch uint64
@@ -162,7 +115,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
}
// verifyLMDFFGConsistent verifies LMD GHOST and FFG votes are consistent with each other.
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot []byte, lmdRoot []byte) error {
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot, lmdRoot []byte) error {
ffgSlot, err := helpers.StartSlot(ffgEpoch)
if err != nil {
return err

View File

@@ -2,11 +2,12 @@ package blockchain
import (
"context"
"sync"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -14,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -130,118 +130,6 @@ func TestStore_OnAttestation(t *testing.T) {
}
}
func TestStore_OnAttestationUsingCheckptCache(t *testing.T) {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{UseCheckPointInfoCache: true})
defer resetCfg()
ctx := context.Background()
db, sc := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(db, sc),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
_, err = blockTree1(db, []byte{'g'})
require.NoError(t, err)
BlkWithOutState := testutil.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := testutil.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
s := testutil.NewBeaconState()
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
BlkWithValidState := testutil.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
s = testutil.NewBeaconState()
err = s.SetFork(&pb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
})
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
tests := []struct {
name string
a *ethpb.Attestation
wantedErr string
}{
{
name: "attestation's data slot not aligned with target vote",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}},
wantedErr: "data slot is not in the same epoch as target 1 != 0",
},
{
name: "attestation's target root not in db",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}}},
wantedErr: "target root does not exist in db",
},
{
name: "no pre state for attestations's target block",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}},
wantedErr: "target epoch 100 does not match current epoch",
},
{
name: "process nil attestation",
a: nil,
wantedErr: "nil attestation",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "nil attestation.Data field",
},
{
name: "process nil field (a.Target) in attestation",
a: &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Target: nil,
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
AggregationBits: make([]byte, 1),
Signature: make([]byte, 96),
},
wantedErr: "nil attestation.Data.Target field",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := service.onAttestation(ctx, tt.a)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
@@ -292,11 +180,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
s1, err = service.checkpointState.StateByCheckpoint(cp1)
s1, err = service.checkpointStateCache.StateByCheckpoint(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
s2, err = service.checkpointState.StateByCheckpoint(cp2)
s2, err = service.checkpointStateCache.StateByCheckpoint(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
@@ -332,7 +220,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, returned.Slot(), checkpoint.Epoch*params.BeaconConfig().SlotsPerEpoch, "Incorrectly returned base state")
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
cached, err := service.checkpointStateCache.StateByCheckpoint(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
@@ -347,13 +235,53 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
cached, err = service.checkpointStateCache.StateByCheckpoint(ctx, newCheckpoint)
require.NoError(t, err)
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
t.Error("Incorrectly cached base state")
}
}
func TestStore_CheckpointState_InProgress(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
epoch := uint64(1)
baseState, _ := testutil.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, service.checkpointStateCache.MarkInProgress(checkpoint))
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
response, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
if !proto.Equal(baseState.InnerStateUnsafe(), response.InnerStateUnsafe()) {
t.Error("Expected equal responses from cache")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
assert.NoError(t, service.checkpointStateCache.AddCheckpointState(checkpoint, baseState))
assert.NoError(t, service.checkpointStateCache.MarkNotInProgress(checkpoint))
}()
testutil.WaitTimeout(&wg, time.Second*2)
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
@@ -491,46 +419,86 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
assert.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
}
func TestGetAttCheckptInfo(t *testing.T) {
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
baseState, _ := testutil.DeterministicGenesisState(t, 128)
b := testutil.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, r))
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, r))
checkpoint := &ethpb.Checkpoint{Root: r[:]}
returned, err := service.getAttCheckPtInfo(ctx, checkpoint, 0)
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
seed, err := helpers.Seed(baseState, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
indices, err := helpers.ActiveValidatorIndices(baseState, 0)
require.NoError(t, err)
validators := baseState.ValidatorsReadOnly()
pks := make([][]byte, len(validators))
for i := 0; i < len(pks); i++ {
pk := validators[i].PublicKey()
pks[i] = pk[:]
}
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
wanted := &pb.CheckPtInfo{
Fork: baseState.Fork(),
GenesisRoot: baseState.GenesisValidatorRoot(),
Seed: seed[:],
ActiveIndices: indices,
PubKeys: pks,
}
require.DeepEqual(t, wanted, returned)
cached, err := service.checkPtInfoCache.get(checkpoint)
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1, Root: r32[:]}
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1, Root: r32[:]}
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
require.DeepEqual(t, wanted, cached)
}

View File

@@ -121,12 +121,26 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
}
// Update deposit cache.
s.depositCache.InsertFinalizedDeposits(ctx, int64(postState.Eth1DepositIndex()))
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not fetch finalized state")
}
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
if featureconfig.Get().EnablePruningDepositProofs {
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
}
}
defer reportAttestationInclusion(b)
return s.handleEpochBoundary(postState)
return s.handleEpochBoundary(ctx, postState)
}
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
@@ -161,12 +175,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return nil
}
var postState *stateTrie.BeaconState
if featureconfig.Get().InitSyncNoVerify {
postState, err = state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
} else {
postState, err = state.ExecuteStateTransition(ctx, preState, signed)
}
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
@@ -200,7 +209,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
}
}
return s.handleEpochBoundary(postState)
return s.handleEpochBoundary(ctx, postState)
}
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
@@ -245,8 +254,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
// Save potential boundary states.
if helpers.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
if err := s.handleEpochBoundary(preState); err != nil {
return nil, nil, fmt.Errorf("could not handle epoch boundary state")
if err := s.handleEpochBoundary(ctx, preState); err != nil {
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
}
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
@@ -280,7 +289,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
b := signed.Block
s.saveInitSyncBlock(blockRoot, signed)
@@ -313,9 +322,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(postState *stateTrie.BeaconState) error {
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
if postState.Slot() >= s.nextEpochBoundarySlot {
reportEpochMetrics(postState)
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
return err
}
var err error
s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState))
if err != nil {
@@ -354,7 +365,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
root [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}

View File

@@ -25,7 +25,7 @@ func (s *Service) CurrentSlot() uint64 {
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "forkChoice.getBlockPreState")
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
defer span.End()
// Verify incoming block has a valid pre state.
@@ -56,7 +56,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
@@ -87,7 +87,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
// VerifyBlkDescendant validates input block root is a descendant of the
// current finalized block root.
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "forkChoice.VerifyBlkDescendant")
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
defer span.End()
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
@@ -255,7 +255,39 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "forkChoice.ancestor")
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
ar, err = s.ancestorByDB(ctx, r, slot)
if err != nil {
return nil, err
}
}
return ar, nil
}
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
defer span.End()
if !s.forkChoiceStore.HasParent(r) {
return nil, errors.New("could not find root in fork choice store")
}
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
}
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
defer span.End()
// Stop recursive ancestry lookup if context is cancelled.
@@ -263,13 +295,6 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
return nil, ctx.Err()
}
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
if s.forkChoiceStore.HasParent(r) {
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
}
signed, err := s.beaconDB.Block(ctx, r)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
@@ -284,10 +309,10 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
}
b := signed.Block
if b.Slot == slot || b.Slot < slot {
return root, nil
return r[:], nil
}
return s.ancestor(ctx, b.ParentRoot, slot)
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
}
// This updates justified check point in store, if the new justified is later than stored justified or
@@ -310,10 +335,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
return nil
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
}
// Update justified if store justified is not in chain with finalized check point.
@@ -339,7 +361,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*ethpb.BeaconBlock, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)

View File

@@ -146,8 +146,8 @@ func TestStore_OnBlockBatch(t *testing.T) {
bState := st.Copy()
blks := []*ethpb.SignedBeaconBlock{}
blkRoots := [][32]byte{}
var blks []*ethpb.SignedBeaconBlock
var blkRoots [][32]byte
var firstState *stateTrie.BeaconState
for i := 1; i < 10; i++ {
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
@@ -319,7 +319,6 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
service.justifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
st := testutil.NewBeaconState()
service.initSyncState[r] = st.Copy()
require.NoError(t, db.SaveState(ctx, st.Copy(), r))
// Could update
@@ -516,6 +515,15 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
slot := svc.CurrentSlot()
require.Equal(t, uint64(0), slot, "Unexpected slot")
}
func TestAncestorByDB_CtxErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(ctx, &Config{})
require.NoError(t, err)
cancel()
_, err = service.ancestorByDB(ctx, [32]byte{}, 0)
require.ErrorContains(t, "context canceled", err)
}
func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
@@ -562,6 +570,82 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
}
}
func TestAncestor_CanUseForkchoice(t *testing.T) {
ctx := context.Background()
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := testutil.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
if bytesutil.ToBytes32(r) != r100 {
t.Error("Did not get correct root")
}
}
func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := testutil.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := testutil.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, db.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
if bytesutil.ToBytes32(r) != r100 {
t.Error("Did not get correct root")
}
}
func TestEnsureRootNotZeroHashes(t *testing.T) {
ctx := context.Background()
cfg := &Config{}
@@ -752,3 +836,15 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
}
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
ctx := context.Background()
cfg := &Config{}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
s := testutil.NewBeaconState()
require.NoError(t, s.SetSlot(1))
service.head = &head{}
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
}

View File

@@ -1,21 +1,18 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/shared/timeutils"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -23,10 +20,9 @@ import (
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error)
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
}
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
@@ -43,35 +39,14 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
return errors.Wrap(err, "could not process attestation")
}
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
// This updates fork choice head, if a new head could not be updated due to
// long range or intermediate forking. It simply logs a warning and returns nil
// as that's more appropriate than returning errors.
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
return nil
}
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
return nil
}
return nil
}
// IsValidAttestation returns true if the attestation can be verified against its pre-state.
func (s *Service) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
baseState, err := s.AttestationPreState(ctx, att)
if err != nil {
log.WithError(err).Error("Failed to get attestation pre state")
return false
}
if err := blocks.VerifyAttestationSignature(ctx, baseState, att); err != nil {
log.WithError(err).Error("Failed to validate attestation")
return false
}
return true
}
// AttestationPreState returns the pre state of attestation.
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error) {
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
@@ -84,24 +59,37 @@ func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestatio
return s.getAttPreState(ctx, att.Data.Target)
}
// AttestationCheckPtInfo returns the check point info of attestation that can be used to verify the attestation
// contents and signatures.
func (s *Service) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
if err != nil {
return nil, err
}
if err := helpers.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
return s.getAttCheckPtInfo(ctx, att.Data.Target, helpers.SlotToEpoch(att.Data.Slot))
}
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
return s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot)
}
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
// When the input root is not be consistent with finalized store then we know it is not
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
return nil
}
f := s.FinalizedCheckpt()
ss, err := helpers.StartSlot(f.Epoch)
if err != nil {
return err
}
r, err := s.ancestor(ctx, root, ss)
if err != nil {
return err
}
if !bytes.Equal(f.Root, r) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// This processes attestations from the attestation pool to account for validator votes and fork choice.
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
// Wait for state to be initialized.
@@ -138,7 +126,7 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
if !s.verifyCheckpointEpoch(a.Data.Target) {
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
continue
}
@@ -155,23 +143,3 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
}
}
}
// This verifies the epoch of input checkpoint is within current epoch and previous epoch
// with respect to current time. Returns true if it's within, false if it's not.
func (s *Service) verifyCheckpointEpoch(c *ethpb.Checkpoint) bool {
now := uint64(timeutils.Now().Unix())
genesisTime := uint64(s.genesisTime.Unix())
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
currentEpoch := helpers.SlotToEpoch(currentSlot)
var prevEpoch uint64
if currentEpoch > 1 {
prevEpoch = currentEpoch - 1
}
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
return false
}
return true
}

View File

@@ -9,34 +9,10 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService.genesisTime = time.Now()
assert.Equal(t, true, chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{Root: make([]byte, 32)}))
assert.Equal(t, false, chainService.verifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 1}))
}
func TestAttestationPreState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService.genesisTime = time.Now()
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
_, err := chainService.AttestationCheckPtInfo(context.Background(), &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Epoch: e}}})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
func TestAttestationCheckPtInfo_FarFutureSlot(t *testing.T) {
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
db, sc := testDB.SetupDB(t)

View File

@@ -7,12 +7,16 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
var epochsSinceFinalitySaveHotStateDB = 100
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
@@ -58,8 +62,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
return err
}
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
return err
}
// Reports on block and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
// Log block sync status.
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
@@ -95,7 +104,7 @@ func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.Sign
})
// Reports on blockCopy and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
// Log state transition data.
log.WithFields(logrus.Fields{
@@ -139,10 +148,12 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
})
// Reports on blockCopy and fork choice metrics.
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
}
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
// log.Fatalf will prevent defer from being called
span.End()
// Exit run time if the node failed to verify weak subjectivity checkpoint.
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
}
@@ -177,3 +188,21 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
}
return nil
}
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality uint64
if currentEpoch > s.finalizedCheckpt.Epoch {
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
}
if sinceFinality >= uint64(epochsSinceFinalitySaveHotStateDB) {
s.stateGen.EnableSaveHotStateToDB(ctx)
return nil
}
return s.stateGen.DisableSaveHotStateToDB(ctx)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"sync"
"testing"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
@@ -17,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestService_ReceiveBlock(t *testing.T) {
@@ -362,7 +364,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasInitSyncBlock(t *testing.T) {
s, err := NewService(context.Background(), &Config{})
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
require.NoError(t, err)
r := [32]byte{'a'}
if s.HasInitSyncBlock(r) {
@@ -373,3 +375,41 @@ func TestService_HasInitSyncBlock(t *testing.T) {
t.Error("Should have block")
}
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
}
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
require.NoError(t, err)
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
require.NoError(t, err)
s.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 10000000}
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}

View File

@@ -34,50 +34,46 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
ctx context.Context
cancel context.CancelFunc
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
chainStartFetcher powchain.ChainStartFetcher
attPool attestations.Pool
slashingPool *slashings.Pool
exitPool *voluntaryexits.Pool
genesisTime time.Time
p2p p2p.Broadcaster
maxRoutines int
head *head
headLock sync.RWMutex
stateNotifier statefeed.Notifier
genesisRoot [32]byte
forkChoiceStore f.ForkChoicer
justifiedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
prevFinalizedCheckpt *ethpb.Checkpoint
nextEpochBoundarySlot uint64
initSyncState map[[32]byte]*stateTrie.BeaconState
boundaryRoots [][32]byte
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
stateGen *stategen.State
opsService *attestations.Service
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
recentCanonicalBlocks map[[32]byte]bool
recentCanonicalBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
checkPtInfoCache *checkPtInfoCache
wsEpoch uint64
wsRoot []byte
wsVerified bool
ctx context.Context
cancel context.CancelFunc
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
chainStartFetcher powchain.ChainStartFetcher
attPool attestations.Pool
slashingPool *slashings.Pool
exitPool *voluntaryexits.Pool
genesisTime time.Time
p2p p2p.Broadcaster
maxRoutines int
head *head
headLock sync.RWMutex
stateNotifier statefeed.Notifier
genesisRoot [32]byte
forkChoiceStore f.ForkChoicer
justifiedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
prevFinalizedCheckpt *ethpb.Checkpoint
nextEpochBoundarySlot uint64
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
stateGen *stategen.State
opsService *attestations.Service
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
wsEpoch uint64
wsRoot []byte
wsVerified bool
}
// Config options for the service.
@@ -104,39 +100,31 @@ type Config struct {
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
return &Service{
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
depositCache: cfg.DepositCache,
chainStartFetcher: cfg.ChainStartFetcher,
attPool: cfg.AttPool,
exitPool: cfg.ExitPool,
slashingPool: cfg.SlashingPool,
p2p: cfg.P2p,
maxRoutines: cfg.MaxRoutines,
stateNotifier: cfg.StateNotifier,
forkChoiceStore: cfg.ForkChoiceStore,
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
checkpointState: cache.NewCheckpointStateCache(),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
recentCanonicalBlocks: make(map[[32]byte]bool),
justifiedBalances: make([]uint64, 0),
checkPtInfoCache: newCheckPointInfoCache(),
wsEpoch: cfg.WspEpoch,
wsRoot: cfg.WspBlockRoot,
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
depositCache: cfg.DepositCache,
chainStartFetcher: cfg.ChainStartFetcher,
attPool: cfg.AttPool,
exitPool: cfg.ExitPool,
slashingPool: cfg.SlashingPool,
p2p: cfg.P2p,
maxRoutines: cfg.MaxRoutines,
stateNotifier: cfg.StateNotifier,
forkChoiceStore: cfg.ForkChoiceStore,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
justifiedBalances: make([]uint64, 0),
wsEpoch: cfg.WspEpoch,
wsRoot: cfg.WspBlockRoot,
}, nil
}
// Start a blockchain service's main event loop.
func (s *Service) Start() {
beaconState, err := s.beaconDB.HeadState(s.ctx)
if err != nil {
log.Fatalf("Could not fetch beacon state: %v", err)
}
// For running initial sync with state cache, in an event of restart, we use
// last finalized check point as start point to sync instead of head
// state. This is because we no longer save state every slot during sync.
@@ -145,27 +133,25 @@ func (s *Service) Start() {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if beaconState == nil {
r := bytesutil.ToBytes32(cp.Root)
// Before the first finalized epoch, in the current epoch,
// the finalized root is defined as zero hashes instead of genesis root hash.
// We want to use genesis root to retrieve for state.
if r == params.BeaconConfig().ZeroHash {
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if genesisBlock != nil {
r, err = genesisBlock.Block.HashTreeRoot()
if err != nil {
log.Fatalf("Could not tree hash genesis block: %v", err)
}
}
}
beaconState, err = s.stateGen.StateByRoot(s.ctx, r)
r := bytesutil.ToBytes32(cp.Root)
// Before the first finalized epoch, in the current epoch,
// the finalized root is defined as zero hashes instead of genesis root hash.
// We want to use genesis root to retrieve for state.
if r == params.BeaconConfig().ZeroHash {
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
log.Fatalf("Could not fetch finalized cp: %v", err)
}
if genesisBlock != nil {
r, err = genesisBlock.Block.HashTreeRoot()
if err != nil {
log.Fatalf("Could not tree hash genesis block: %v", err)
}
}
}
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
// Make sure that attestation processor is subscribed and ready for state initializing event.
@@ -207,6 +193,19 @@ func (s *Service) Start() {
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if err != nil {
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
}
h := s.headBlock().Block
log.WithFields(logrus.Fields{
"startSlot": ss,
"endSlot": h.Slot,
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
}
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
// Exit run time if the node failed to verify weak subjectivity checkpoint.
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
@@ -330,11 +329,7 @@ func (s *Service) Stop() error {
}
// Save initial sync cached blocks to the DB before stop.
if err := s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks()); err != nil {
return err
}
return nil
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
}
// Status always returns nil unless there is an error condition that causes
@@ -346,12 +341,6 @@ func (s *Service) Status() error {
return nil
}
// ClearCachedStates removes all stored caches states. This is done after the node
// is synced.
func (s *Service) ClearCachedStates() {
s.initSyncState = map[[32]byte]*stateTrie.BeaconState{}
}
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
stateRoot, err := genesisState.HashTreeRoot(ctx)
@@ -429,7 +418,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
}
s.genesisRoot = genesisBlkRoot
if flags.Get().UnsafeSync {
if flags.Get().HeadSync {
headBlock, err := s.beaconDB.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not retrieve head block")
@@ -438,7 +427,13 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not hash head block")
}
headState, err := s.beaconDB.HeadState(ctx)
finalizedState, err := s.stateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state")
}
@@ -478,7 +473,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
// information to fork choice service to initializes fork choice store.
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
s.forkChoiceStore = store
}

View File

@@ -67,7 +67,7 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
return nil
}
var _ = p2p.Broadcaster(&mockBroadcaster{})
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummaryCache) *Service {
endpoint := "http://127.0.0.1"
@@ -218,7 +218,9 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
if headBlk == nil {
t.Error("Head state can't be nil after initialize beacon chain")
}
if bc.headRoot() == params.BeaconConfig().ZeroHash {
r, err := bc.HeadRoot(ctx)
require.NoError(t, err)
if bytesutil.ToBytes32(r) == params.BeaconConfig().ZeroHash {
t.Error("Canonical root for slot 0 can't be zeros after initialize beacon chain")
}
}

View File

@@ -38,6 +38,7 @@ type ChainService struct {
Balance *precompute.Balance
Genesis time.Time
ValidatorsRoot [32]byte
CanonicalRoots map[[32]byte]bool
Fork *pb.Fork
ETH1Data *ethpb.Eth1Data
DB db.Database
@@ -80,7 +81,8 @@ func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
// MockStateNotifier mocks the state notifier.
type MockStateNotifier struct {
feed *event.Feed
feed *event.Feed
feedLock sync.Mutex
recv []*feed.Event
recvLock sync.Mutex
@@ -98,6 +100,9 @@ func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
// StateFeed returns a state feed.
func (msn *MockStateNotifier) StateFeed() *event.Feed {
msn.feedLock.Lock()
defer msn.feedLock.Unlock()
if msn.feed == nil && msn.recvCh == nil {
msn.feed = new(event.Feed)
if msn.RecordEvents {
@@ -141,7 +146,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
}
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
@@ -168,7 +173,7 @@ func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethp
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, roots [][32]byte) error {
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
@@ -197,7 +202,7 @@ func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.Sig
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
if ms.State == nil {
ms.State = &stateTrie.BeaconState{}
}
@@ -232,7 +237,7 @@ func (ms *ChainService) HeadSlot() uint64 {
}
// HeadRoot mocks HeadRoot method in chain service.
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
func (ms *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
if len(ms.Root) > 0 {
return ms.Root, nil
}
@@ -270,7 +275,7 @@ func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
}
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
func (ms *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
return nil
}
@@ -280,12 +285,12 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
}
// AttestationPreState mocks AttestationPreState method in chain service.
func (ms *ChainService) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*stateTrie.BeaconState, error) {
func (ms *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
return ms.State, nil
}
// HeadValidatorsIndices mocks the same method in the chain service.
func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
func (ms *ChainService) HeadValidatorsIndices(_ context.Context, epoch uint64) ([]uint64, error) {
if ms.State == nil {
return []uint64{}, nil
}
@@ -293,7 +298,7 @@ func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64)
}
// HeadSeed mocks the same method in the chain service.
func (ms *ChainService) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
func (ms *ChainService) HeadSeed(_ context.Context, epoch uint64) ([32]byte, error) {
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
}
@@ -323,26 +328,27 @@ func (ms *ChainService) CurrentSlot() uint64 {
}
// Participation mocks the same method in the chain service.
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
func (ms *ChainService) Participation(_ uint64) *precompute.Balance {
return ms.Balance
}
// IsValidAttestation always returns true.
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
func (ms *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
return ms.ValidAttestation
}
// IsCanonical returns and determines whether a block with the provided root is part of
// the canonical chain.
func (ms *ChainService) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
func (ms *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error) {
if ms.CanonicalRoots != nil {
_, ok := ms.CanonicalRoots[r]
return ok, nil
}
return true, nil
}
// ClearCachedStates does nothing.
func (ms *ChainService) ClearCachedStates() {}
// HasInitSyncBlock mocks the same method in the chain service.
func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
func (ms *ChainService) HasInitSyncBlock(_ [32]byte) bool {
return false
}
@@ -352,17 +358,28 @@ func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
}
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
func (ms *ChainService) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
func (ms *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
return ms.VerifyBlkDescendantErr
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (ms *ChainService) VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error {
func (ms *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
return errors.New("LMD and FFG miss matched")
}
return nil
}
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
func (ms *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
if !bytes.Equal(r, ms.FinalizedCheckPoint.Root) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// AttestationCheckPtInfo mocks AttestationCheckPtInfo and always returns nil.
func (ms *ChainService) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
func (ms *ChainService) AttestationCheckPtInfo(_ context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
f := ms.State.Fork()
g := bytesutil.ToBytes32(ms.State.GenesisValidatorRoot())
seed, err := helpers.Seed(ms.State, helpers.SlotToEpoch(att.Data.Slot), params.BeaconConfig().DomainBeaconAttester)

View File

@@ -2,6 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
# gazelle:ignore committee_disabled.go
# gazelle:ignore proposer_indices_disabled.go
go_library(
name = "go_default_library",
srcs = [
@@ -14,12 +15,15 @@ go_library(
"skip_slot_cache.go",
"state_summary.go",
"subnet_ids.go",
"proposer_indices_type.go",
] + select({
"//fuzz:fuzzing_enabled": [
"committee_disabled.go",
"proposer_indices_disabled.go"
],
"//conditions:default": [
"committee.go",
"proposer_indices.go",
],
}),
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
@@ -57,6 +61,7 @@ go_test(
"hot_state_cache_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"proposer_indices_test.go"
],
embed = [":go_default_library"],
deps = [

View File

@@ -12,7 +12,6 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"k8s.io/client-go/tools/cache"
)
@@ -99,10 +98,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
attestationCacheHit.Inc()
if featureconfig.Get().ReduceAttesterStateCopy {
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
}
return item.(*attestationReqResWrapper).res, nil
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
}
attestationCacheMiss.Inc()
return nil, nil
@@ -138,7 +134,7 @@ func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest)
}
// Put the response in the cache.
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
func (c *AttestationCache) Put(_ context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
data := &attestationReqResWrapper{
req,
res,
@@ -167,10 +163,7 @@ func wrapperToKey(i interface{}) (string, error) {
}
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
if featureconfig.Get().ReduceAttesterStateCopy {
return fmt.Sprintf("%d", req.Slot), nil
}
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
return fmt.Sprintf("%d", req.Slot), nil
}
type attestationReqResWrapper struct {

View File

@@ -1,8 +1,10 @@
package cache
import (
"errors"
"context"
"math"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
@@ -13,10 +15,6 @@ import (
)
var (
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
// a CheckpointState struct.
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
// window to accept attestation based on latest spec.
@@ -35,8 +33,9 @@ var (
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
type CheckpointStateCache struct {
cache *lru.Cache
lock sync.RWMutex
cache *lru.Cache
lock sync.RWMutex
inProgress map[string]bool
}
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
@@ -46,22 +45,45 @@ func NewCheckpointStateCache() *CheckpointStateCache {
panic(err)
}
return &CheckpointStateCache{
cache: cache,
cache: cache,
inProgress: map[string]bool{},
}
}
// StateByCheckpoint fetches state by checkpoint. Returns true with a
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
c.lock.RLock()
defer c.lock.RUnlock()
h, err := hashutil.HashProto(cp)
func (c *CheckpointStateCache) StateByCheckpoint(ctx context.Context, cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
k, err := checkpointKey(cp)
if err != nil {
return nil, err
}
item, exists := c.cache.Get(h)
delay := minDelay
// Another identical request may be in progress already. Let's wait until
// any in progress request resolves or our timeout is exceeded.
for {
if ctx.Err() != nil {
return nil, ctx.Err()
}
c.lock.RLock()
if !c.inProgress[k] {
c.lock.RUnlock()
break
}
c.lock.RUnlock()
// This increasing backoff is to decrease the CPU cycles while waiting
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = math.Min(delay, maxDelay)
}
c.lock.RLock()
defer c.lock.RUnlock()
item, exists := c.cache.Get(k)
if exists && item != nil {
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
@@ -77,10 +99,47 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s *stateTrie.BeaconState) error {
c.lock.Lock()
defer c.lock.Unlock()
h, err := hashutil.HashProto(cp)
k, err := checkpointKey(cp)
if err != nil {
return err
}
c.cache.Add(h, s)
c.cache.ContainsOrAdd(k, s)
return nil
}
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *CheckpointStateCache) MarkInProgress(cp *ethpb.Checkpoint) error {
c.lock.Lock()
defer c.lock.Unlock()
k, err := checkpointKey(cp)
if err != nil {
return err
}
if c.inProgress[k] {
return ErrAlreadyInProgress
}
c.inProgress[k] = true
return nil
}
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *CheckpointStateCache) MarkNotInProgress(cp *ethpb.Checkpoint) error {
c.lock.Lock()
defer c.lock.Unlock()
k, err := checkpointKey(cp)
if err != nil {
return err
}
delete(c.inProgress, k)
return nil
}
func checkpointKey(cp *ethpb.Checkpoint) (string, error) {
h, err := hashutil.HashProto(cp)
if err != nil {
return "", err
}
return string(h[:]), nil
}

View File

@@ -1,6 +1,7 @@
package cache
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
@@ -23,13 +24,13 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
})
require.NoError(t, err)
state, err := cache.StateByCheckpoint(cp1)
state, err := cache.StateByCheckpoint(context.Background(), cp1)
require.NoError(t, err)
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Expected state not to exist in empty cache")
require.NoError(t, cache.AddCheckpointState(cp1, st))
state, err = cache.StateByCheckpoint(cp1)
state, err = cache.StateByCheckpoint(context.Background(), cp1)
require.NoError(t, err)
if !proto.Equal(state.InnerStateUnsafe(), st.InnerStateUnsafe()) {
@@ -43,11 +44,11 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
require.NoError(t, err)
require.NoError(t, cache.AddCheckpointState(cp2, st2))
state, err = cache.StateByCheckpoint(cp2)
state, err = cache.StateByCheckpoint(context.Background(), cp2)
require.NoError(t, err)
assert.DeepEqual(t, st2.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
state, err = cache.StateByCheckpoint(cp1)
state, err = cache.StateByCheckpoint(context.Background(), cp1)
require.NoError(t, err)
assert.DeepEqual(t, st.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
}

View File

@@ -104,35 +104,6 @@ func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error
return nil
}
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
func (c *CommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
c.lock.Lock()
defer c.lock.Unlock()
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return err
}
if !exists {
committees := &Committees{ProposerIndices: indices}
if err := c.CommitteeCache.Add(committees); err != nil {
return err
}
} else {
committees, ok := obj.(*Committees)
if !ok {
return ErrNotCommittee
}
committees.ProposerIndices = indices
if err := c.CommitteeCache.Add(committees); err != nil {
return err
}
}
trim(c.CommitteeCache, maxCommitteesCacheSize)
return nil
}
// ActiveIndices returns the active indices of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
c.lock.RLock()
@@ -181,30 +152,6 @@ func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
return len(item.SortedIndices), nil
}
// ProposerIndices returns the proposer indices of a given seed.
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
if err != nil {
return nil, err
}
if exists {
CommitteeCacheHit.Inc()
} else {
CommitteeCacheMiss.Inc()
return nil, nil
}
item, ok := obj.(*Committees)
if !ok {
return nil, ErrNotCommittee
}
return item.ProposerIndices, nil
}
// HasEntry returns true if the committee cache has a value.
func (c *CommitteeCache) HasEntry(seed string) bool {
_, ok, err := c.CommitteeCache.GetByKey(seed)

View File

@@ -87,35 +87,6 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
assert.Equal(t, len(item.SortedIndices), count)
}
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
cache := NewCommitteesCache()
seed := [32]byte{'A'}
indices, err := cache.ProposerIndices(seed)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
require.NoError(t, cache.AddProposerIndicesList(seed, indices))
received, err := cache.ProposerIndices(seed)
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
require.NoError(t, cache.AddCommitteeShuffledList(item))
indices, err = cache.ProposerIndices(item.Seed)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
require.NoError(t, cache.AddProposerIndicesList(item.Seed, indices))
received, err = cache.ProposerIndices(item.Seed)
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
}
func TestCommitteeCache_CanRotate(t *testing.T) {
cache := NewCommitteesCache()
@@ -150,7 +121,6 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
Seed: seed,
ShuffledIndices: []uint64{0},
SortedIndices: []uint64{},
ProposerIndices: []uint64{},
})
require.NoError(t, err)

View File

@@ -12,5 +12,4 @@ type Committees struct {
Seed [32]byte
ShuffledIndices []uint64
SortedIndices []uint64
ProposerIndices []uint64
}

View File

@@ -24,6 +24,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
}
// popProcessNoopFunc is a no-op function that never returns an error.
func popProcessNoopFunc(obj interface{}) error {
func popProcessNoopFunc(_ interface{}) error {
return nil
}

View File

@@ -50,17 +50,17 @@ type FinalizedDeposits struct {
// stores all the deposit related data that is required by the beacon-node.
type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*dbpb.DepositContainer
deposits []*dbpb.DepositContainer
finalizedDeposits *FinalizedDeposits
depositsLock sync.RWMutex
chainStartDeposits []*ethpb.Deposit
chainStartPubkeys map[string]bool
pendingDeposits []*dbpb.DepositContainer
deposits []*dbpb.DepositContainer
finalizedDeposits *FinalizedDeposits
depositsLock sync.RWMutex
chainStartPubkeys map[string]bool
chainStartPubkeysLock sync.RWMutex
}
// New instantiates a new deposit cache
func New() (*DepositCache, error) {
finalizedDepositsTrie, err := trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
finalizedDepositsTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
@@ -68,11 +68,10 @@ func New() (*DepositCache, error) {
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
// Inserting the first item into the trie will set the value of the index to 0.
return &DepositCache{
pendingDeposits: []*dbpb.DepositContainer{},
deposits: []*dbpb.DepositContainer{},
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
chainStartPubkeys: make(map[string]bool),
chainStartDeposits: make([]*ethpb.Deposit, 0),
pendingDeposits: []*dbpb.DepositContainer{},
deposits: []*dbpb.DepositContainer{},
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
chainStartPubkeys: make(map[string]bool),
}, nil
}
@@ -119,7 +118,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
defer dc.depositsLock.Unlock()
depositTrie := dc.finalizedDeposits.Deposits
insertIndex := dc.finalizedDeposits.MerkleTrieIndex + 1
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
continue
@@ -132,7 +131,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
return
}
depositTrie.Insert(depHash[:], int(insertIndex))
depositTrie.Insert(depHash[:], insertIndex)
insertIndex++
}
@@ -156,6 +155,8 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.Deposi
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
defer span.End()
dc.chainStartPubkeysLock.Lock()
defer dc.chainStartPubkeysLock.Unlock()
dc.chainStartPubkeys[pubkey] = true
}
@@ -163,6 +164,8 @@ func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey stri
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
defer span.End()
dc.chainStartPubkeysLock.RLock()
defer dc.chainStartPubkeysLock.RUnlock()
if dc.chainStartPubkeys != nil {
return dc.chainStartPubkeys[pubkey]
}
@@ -258,3 +261,28 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
return deposits
}
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if untilDepositIndex > int64(len(dc.deposits)) {
untilDepositIndex = int64(len(dc.deposits) - 1)
}
for i := untilDepositIndex; i >= 0; i-- {
if ctx.Err() != nil {
return ctx.Err()
}
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
if dc.deposits[i].Deposit.Proof == nil {
break
}
dc.deposits[i].Deposit.Proof = nil
}
return nil
}

View File

@@ -19,7 +19,7 @@ import (
const nilDepositErr = "Ignoring nil deposit insertion"
var _ = DepositFetcher(&DepositCache{})
var _ DepositFetcher = (*DepositCache)(nil)
func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
hook := logTest.NewGlobal()
@@ -387,7 +387,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
@@ -445,7 +445,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
}
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
@@ -573,3 +573,137 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
assert.Equal(t, 1, len(deps))
}
func TestPruneProofs_Ok(t *testing.T) {
dc, err := New()
require.NoError(t, err)
deposits := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
},
}
for _, ins := range deposits {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
require.NoError(t, dc.PruneProofs(context.Background(), 1))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
}
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
dc, err := New()
require.NoError(t, err)
deposits := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
},
}
for _, ins := range deposits {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
require.NoError(t, dc.PruneProofs(context.Background(), 2))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
}
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
dc, err := New()
require.NoError(t, err)
deposits := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
},
}
for _, ins := range deposits {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
require.NoError(t, dc.PruneProofs(context.Background(), 99))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
}
func makeDepositProof() [][]byte {
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
for i := range proof {
proof[i] = make([]byte, 32)
}
return proof
}

View File

@@ -9,11 +9,10 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
)
var _ = PendingDepositsFetcher(&DepositCache{})
var _ PendingDepositsFetcher = (*DepositCache)(nil)
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := DepositCache{}
@@ -29,14 +28,6 @@ func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
assert.Equal(t, 0, len(dc.pendingDeposits))
}
func makeDepositProof() [][]byte {
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
for i := range proof {
proof[i] = make([]byte, 32)
}
return proof
}
func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
proof1 := makeDepositProof()

View File

@@ -10,5 +10,8 @@ import (
func TestMain(m *testing.M) {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
defer resetCfg()
os.Exit(m.Run())
code := m.Run()
// os.Exit will prevent defer from being called
resetCfg()
os.Exit(code)
}

88
beacon-chain/cache/proposer_indices.go vendored Normal file
View File

@@ -0,0 +1,88 @@
// +build !libfuzzer
package cache
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"k8s.io/client-go/tools/cache"
)
var (
// maxProposerIndicesCacheSize defines the max number of proposer indices on per block root basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxProposerIndicesCacheSize = uint64(8)
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "proposer_indices_cache_miss",
Help: "The number of proposer indices requests that aren't present in the cache.",
})
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "proposer_indices_cache_hit",
Help: "The number of proposer indices requests that are present in the cache.",
})
)
// ProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
type ProposerIndicesCache struct {
ProposerIndicesCache *cache.FIFO
lock sync.RWMutex
}
// proposerIndicesKeyFn takes the block root as the key to retrieve proposer indices in a given epoch.
func proposerIndicesKeyFn(obj interface{}) (string, error) {
info, ok := obj.(*ProposerIndices)
if !ok {
return "", ErrNotProposerIndices
}
return key(info.BlockRoot), nil
}
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
func NewProposerIndicesCache() *ProposerIndicesCache {
return &ProposerIndicesCache{
ProposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
}
}
// AddProposerIndices adds ProposerIndices object to the cache.
// This method also trims the least recently list if the cache size has ready the max cache size limit.
func (c *ProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.ProposerIndicesCache.AddIfNotPresent(p); err != nil {
return err
}
trim(c.ProposerIndicesCache, maxProposerIndicesCacheSize)
return nil
}
// ProposerIndices returns the proposer indices of a block root seed.
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
if err != nil {
return nil, err
}
if exists {
ProposerIndicesCacheHit.Inc()
} else {
ProposerIndicesCacheMiss.Inc()
return nil, nil
}
item, ok := obj.(*ProposerIndices)
if !ok {
return nil, ErrNotProposerIndices
}
return item.ProposerIndices, nil
}

View File

@@ -0,0 +1,24 @@
// +build libfuzzer
// This file is used in fuzzer builds to bypass proposer indices caches.
package cache
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
type FakeProposerIndicesCache struct {
}
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
func NewProposerIndicesCache() *FakeProposerIndicesCache {
return &FakeProposerIndicesCache{}
}
// AddProposerIndices adds ProposerIndices object to the cache.
// This method also trims the least recently list if the cache size has ready the max cache size limit.
func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
return nil
}
// ProposerIndices returns the proposer indices of a block root seed.
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
return nil, nil
}

View File

@@ -0,0 +1,63 @@
package cache
import (
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProposerKeyFn_OK(t *testing.T) {
item := &ProposerIndices{
BlockRoot: [32]byte{'A'},
ProposerIndices: []uint64{1, 2, 3, 4, 5},
}
k, err := proposerIndicesKeyFn(item)
require.NoError(t, err)
assert.Equal(t, key(item.BlockRoot), k)
}
func TestProposerKeyFn_InvalidObj(t *testing.T) {
_, err := proposerIndicesKeyFn("bad")
assert.Equal(t, ErrNotProposerIndices, err)
}
func TestProposerCache_AddProposerIndicesList(t *testing.T) {
cache := NewProposerIndicesCache()
bRoot := [32]byte{'A'}
indices, err := cache.ProposerIndices(bRoot)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
require.NoError(t, cache.AddProposerIndices(&ProposerIndices{
ProposerIndices: indices,
BlockRoot: bRoot,
}))
received, err := cache.ProposerIndices(bRoot)
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []uint64{1, 2, 3, 4, 5, 6}}
require.NoError(t, cache.AddProposerIndices(item))
received, err = cache.ProposerIndices(item.BlockRoot)
require.NoError(t, err)
assert.DeepEqual(t, item.ProposerIndices, received)
}
func TestProposerCache_CanRotate(t *testing.T) {
cache := NewProposerIndicesCache()
for i := 0; i < int(maxProposerIndicesCacheSize)+1; i++ {
s := []byte(strconv.Itoa(i))
item := &ProposerIndices{BlockRoot: bytesutil.ToBytes32(s)}
require.NoError(t, cache.AddProposerIndices(item))
}
k := cache.ProposerIndicesCache.ListKeys()
assert.Equal(t, maxProposerIndicesCacheSize, uint64(len(k)))
}

View File

@@ -0,0 +1,13 @@
package cache
import "errors"
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
// a ProposerIndices struct.
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
// ProposerIndices defines the cached struct for proposer indices.
type ProposerIndices struct {
BlockRoot [32]byte
ProposerIndices []uint64
}

View File

@@ -30,7 +30,7 @@ type SkipSlotCache struct {
cache *lru.Cache
lock sync.RWMutex
disabled bool // Allow for programmatic toggling of the cache, useful during initial sync.
inProgress map[uint64]bool
inProgress map[[32]byte]bool
}
// NewSkipSlotCache initializes the map and underlying cache.
@@ -41,7 +41,7 @@ func NewSkipSlotCache() *SkipSlotCache {
}
return &SkipSlotCache{
cache: cache,
inProgress: make(map[uint64]bool),
inProgress: make(map[[32]byte]bool),
}
}
@@ -57,7 +57,7 @@ func (c *SkipSlotCache) Disable() {
// Get waits for any in progress calculation to complete before returning a
// cached response, if any.
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
defer span.End()
if c.disabled {
@@ -77,7 +77,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
}
c.lock.RLock()
if !c.inProgress[slot] {
if !c.inProgress[r] {
c.lock.RUnlock()
break
}
@@ -92,7 +92,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
}
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
item, exists := c.cache.Get(slot)
item, exists := c.cache.Get(r)
if exists && item != nil {
skipSlotCacheHit.Inc()
@@ -106,7 +106,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
@@ -114,16 +114,16 @@ func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
c.lock.Lock()
defer c.lock.Unlock()
if c.inProgress[slot] {
if c.inProgress[r] {
return ErrAlreadyInProgress
}
c.inProgress[slot] = true
c.inProgress[r] = true
return nil
}
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
if c.disabled {
return nil
}
@@ -131,18 +131,18 @@ func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.inProgress, slot)
delete(c.inProgress, r)
return nil
}
// Put the response in the cache.
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state *stateTrie.BeaconState) error {
if c.disabled {
return nil
}
// Copy state so cached value is not mutated.
c.cache.Add(slot, state.Copy())
c.cache.Add(r, state.Copy())
return nil
}

View File

@@ -15,21 +15,22 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
state, err := c.Get(ctx, 5)
r := [32]byte{'a'}
state, err := c.Get(ctx, r)
require.NoError(t, err)
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Empty cache returned an object")
require.NoError(t, c.MarkInProgress(5))
require.NoError(t, c.MarkInProgress(r))
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
Slot: 10,
})
require.NoError(t, err)
require.NoError(t, c.Put(ctx, 5, state))
require.NoError(t, c.MarkNotInProgress(5))
require.NoError(t, c.Put(ctx, r, state))
require.NoError(t, c.MarkNotInProgress(r))
res, err := c.Get(ctx, 5)
res, err := c.Get(ctx, r)
require.NoError(t, err)
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
}

View File

@@ -50,11 +50,11 @@ func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
blks := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
summaries := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
for _, b := range s.initSyncStateSummaries {
blks = append(blks, b)
summaries = append(summaries, b)
}
return blks
return summaries
}
// Clear clears out the initial sync state summaries cache.

View File

@@ -25,12 +25,12 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2
attesterCache, err := lru.New(int(cacheSize))
cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2)
attesterCache, err := lru.New(cacheSize)
if err != nil {
panic(err)
}
aggregatorCache, err := lru.New(int(cacheSize))
aggregatorCache, err := lru.New(cacheSize)
if err != nil {
panic(err)
}
@@ -41,7 +41,7 @@ func newSubnetIDs() *subnetIDs {
}
// AddAttesterSubnetID adds the subnet index for subscribing subnet for the attester of a given slot.
func (c *subnetIDs) AddAttesterSubnetID(slot uint64, subnetID uint64) {
func (c *subnetIDs) AddAttesterSubnetID(slot, subnetID uint64) {
c.attesterLock.Lock()
defer c.attesterLock.Unlock()
@@ -69,7 +69,7 @@ func (c *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
}
// AddAggregatorSubnetID adds the subnet ID for subscribing subnet for the aggregator of a given slot.
func (c *subnetIDs) AddAggregatorSubnetID(slot uint64, subnetID uint64) {
func (c *subnetIDs) AddAggregatorSubnetID(slot, subnetID uint64) {
c.aggregatorLock.Lock()
defer c.aggregatorLock.Unlock()
@@ -113,7 +113,7 @@ func (c *subnetIDs) GetAllSubnets() []uint64 {
defer c.subnetsLock.RUnlock()
itemsMap := c.persistentSubnets.Items()
committees := []uint64{}
var committees []uint64
for _, v := range itemsMap {
if v.Expired() {

View File

@@ -42,12 +42,10 @@ func TestSubnetIDsCache_RoundTrip(t *testing.T) {
}
func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
pubkeySet := [][48]byte{}
c := newSubnetIDs()
for i := 0; i < 20; i++ {
pubkey := [48]byte{byte(i)}
pubkeySet = append(pubkeySet, pubkey)
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
}

View File

@@ -21,6 +21,7 @@ go_library(
"//beacon-chain:__subpackages__",
"//fuzz:__pkg__",
"//shared/testutil:__pkg__",
"//validator/accounts:__pkg__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",
@@ -49,6 +50,7 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"attestation_regression_test.go",
"attestation_test.go",
"attester_slashing_test.go",
"block_operations_fuzz_test.go",
@@ -61,6 +63,7 @@ go_test(
"proposer_slashing_test.go",
"randao_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
shard_count = 2,
deps = [

View File

@@ -158,7 +158,7 @@ func ProcessAttestationNoVerifySignature(
return nil, err
}
c := helpers.SlotCommitteeCount(activeValidatorCount)
if att.Data.CommitteeIndex > c {
if att.Data.CommitteeIndex >= c {
return nil, fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
}
@@ -314,7 +314,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon
return err
}
indices := indexedAtt.AttestingIndices
pubkeys := []bls.PublicKey{}
var pubkeys []bls.PublicKey
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
@@ -365,7 +365,7 @@ func VerifyAttSigUseCheckPt(ctx context.Context, c *pb.CheckPtInfo, att *ethpb.A
return err
}
indices := indexedAtt.AttestingIndices
pubkeys := []bls.PublicKey{}
var pubkeys []bls.PublicKey
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := c.PubKeys[indices[i]]
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)

View File

@@ -0,0 +1,44 @@
package blocks_test
import (
"context"
"io/ioutil"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
// Beaconfuzz discovered an off by one issue where an attestation could be produced which would pass
// validation when att.Data.CommitteeIndex is 1 and the committee count per slot is also 1. The only
// valid att.Data.Committee index would be 0, so this is an off by one error.
// See: https://github.com/sigp/beacon-fuzz/issues/78
func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) {
attData, err := ioutil.ReadFile("testdata/beaconfuzz_78_attestation.ssz")
if err != nil {
t.Fatal(err)
}
att := &ethpb.Attestation{}
if err := att.UnmarshalSSZ(attData); err != nil {
t.Fatal(err)
}
stateData, err := ioutil.ReadFile("testdata/beaconfuzz_78_beacon.ssz")
if err != nil {
t.Fatal(err)
}
spb := &pb.BeaconState{}
if err := spb.UnmarshalSSZ(stateData); err != nil {
t.Fatal(err)
}
st, err := state.InitializeFromProtoUnsafe(spb)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
_, err = blocks.ProcessAttestationNoVerifySignature(ctx, st, att)
require.ErrorContains(t, "committee index 1 >= committee count 1", err)
}

View File

@@ -639,7 +639,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
list := bitfield.Bitlist{0b11111111}
atts := []*ethpb.Attestation{}
var atts []*ethpb.Attestation
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{

View File

@@ -115,7 +115,7 @@ func VerifyAttesterSlashing(ctx context.Context, beaconState *stateTrie.BeaconSt
// # Surround vote
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
// )
func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.AttestationData) bool {
func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
return false
}

View File

@@ -2,7 +2,6 @@ package blocks_test
import (
"context"
"fmt"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -57,7 +56,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
},
},
}
registry := []*ethpb.Validator{}
var registry []*ethpb.Validator
currentSlot := uint64(0)
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
@@ -71,13 +70,12 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
AttesterSlashings: slashings,
},
}
want := fmt.Sprint("attestations are not slashable")
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
assert.ErrorContains(t, want, err)
assert.ErrorContains(t, "attestations are not slashable", err)
}
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
registry := []*ethpb.Validator{}
var registry []*ethpb.Validator
currentSlot := uint64(0)
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
@@ -116,9 +114,8 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
},
}
want := fmt.Sprint("validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE")
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
assert.ErrorContains(t, want, err)
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
}
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {

View File

@@ -6,9 +6,7 @@ import (
fuzz "github.com/google/gofuzz"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
@@ -17,13 +15,13 @@ import (
func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ctx := context.Background()
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
att := &eth.Attestation{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(att)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
_, err = ProcessAttestationNoVerifySignature(ctx, s, att)
_ = err
@@ -32,14 +30,14 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
block := &eth.SignedBeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
_, err = ProcessBlockHeader(context.Background(), s, block)
_ = err
@@ -48,13 +46,13 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ba := []byte{}
var ba []byte
pubkey := [48]byte{}
sig := [96]byte{}
domain := [4]byte{}
p := []byte{}
s := []byte{}
d := []byte{}
var p []byte
var s []byte
var d []byte
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(&ba)
fuzzer.Fuzz(&pubkey)
@@ -100,11 +98,11 @@ func TestFuzzareEth1DataEqual_10000(t *testing.T) {
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
eth1data := &eth.Eth1Data{}
stateVotes := []*eth.Eth1Data{}
var stateVotes []*eth.Eth1Data
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(&stateVotes)
s, err := beaconstate.InitializeFromProto(&ethereum_beacon_p2p_v1.BeaconState{
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{
Eth1DataVotes: stateVotes,
})
require.NoError(t, err)
@@ -116,13 +114,13 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
block := &eth.BeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
_, err = ProcessBlockHeaderNoVerify(s, block)
_ = err
@@ -131,13 +129,13 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
func TestFuzzProcessRandao_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessRandao(context.Background(), s, b)
if err != nil && r != nil {
@@ -148,13 +146,13 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
blockBody := &eth.BeaconBlockBody{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessRandaoNoVerify(s, blockBody)
if err != nil && r != nil {
@@ -165,13 +163,13 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessProposerSlashings(ctx, s, b)
if err != nil && r != nil {
@@ -182,12 +180,12 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
proposerSlashing := &eth.ProposerSlashing{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(proposerSlashing)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
err = VerifyProposerSlashing(s, proposerSlashing)
_ = err
@@ -196,13 +194,13 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttesterSlashings(ctx, s, b)
if err != nil && r != nil {
@@ -213,13 +211,13 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
attesterSlashing := &eth.AttesterSlashing{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attesterSlashing)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
err = VerifyAttesterSlashing(ctx, s, attesterSlashing)
_ = err
@@ -250,13 +248,13 @@ func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
func TestFuzzProcessAttestations_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttestations(ctx, s, b)
if err != nil && r != nil {
@@ -267,13 +265,13 @@ func TestFuzzProcessAttestations_10000(t *testing.T) {
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttestationsNoVerifySignature(ctx, s, b)
if err != nil && r != nil {
@@ -284,13 +282,13 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
func TestFuzzProcessAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
attestation := &eth.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttestation(ctx, s, attestation)
if err != nil && r != nil {
@@ -301,13 +299,13 @@ func TestFuzzProcessAttestation_10000(t *testing.T) {
func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
idxAttestation := &eth.IndexedAttestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(idxAttestation)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
err = VerifyIndexedAttestation(ctx, s, idxAttestation)
_ = err
@@ -316,13 +314,13 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
func TestFuzzVerifyAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
attestation := &eth.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
err = VerifyAttestationSignature(ctx, s, attestation)
_ = err
@@ -331,13 +329,13 @@ func TestFuzzVerifyAttestation_10000(t *testing.T) {
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessDeposits(ctx, s, b)
if err != nil && r != nil {
@@ -348,14 +346,14 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
deposit := &eth.Deposit{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessPreGenesisDeposits(ctx, s, []*eth.Deposit{deposit})
if err != nil && r != nil {
@@ -366,13 +364,13 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
deposit := &eth.Deposit{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessDeposit(s, deposit, true)
if err != nil && r != nil {
@@ -383,12 +381,12 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
func TestFuzzverifyDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
deposit := &eth.Deposit{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
err = verifyDeposit(s, deposit)
_ = err
@@ -397,13 +395,13 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(ctx, s, b)
if err != nil && r != nil {
@@ -414,12 +412,12 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethereum_beacon_p2p_v1.BeaconState{}
state := &pb.BeaconState{}
b := &eth.SignedBeaconBlock{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := beaconstate.InitializeFromProtoUnsafe(state)
s, err := stateTrie.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(context.Background(), s, b)
if err != nil && r != nil {
@@ -431,7 +429,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
func TestFuzzVerifyExit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &eth.SignedVoluntaryExit{}
val := &beaconstate.ReadOnlyValidator{}
val := &stateTrie.ReadOnlyValidator{}
fork := &pb.Fork{}
var slot uint64

View File

@@ -50,7 +50,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
require.NoError(t, err)
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err, "Could not get signing root of beacon block header")
aggSigs := []bls.Signature{}
var aggSigs []bls.Signature
for _, index := range setA {
sig := privKeys[index].Sign(signingRoot[:])
aggSigs = append(aggSigs, sig)

View File

@@ -158,8 +158,7 @@ func ProcessDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit,
if err != nil {
return nil, err
}
depositSig := deposit.Data.Signature
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
// Ignore this error as in the spec pseudo code.
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
return beaconState, nil
@@ -223,7 +222,7 @@ func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) e
}
// Deprecated: This method uses deprecated ssz.SigningRoot.
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error {
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
return depositutil.VerifyDepositSignature(obj, domain)
}

View File

@@ -67,7 +67,7 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
require.NoError(t, err)
// We then create a merkle branch for the test.
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate trie")
proof, err := depositTrie.MerkleProof(0)
require.NoError(t, err, "Could not generate proof")
@@ -132,7 +132,8 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
}
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
sk := bls.RandKey()
sk, err := bls.RandKey()
require.NoError(t, err)
deposit := &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: sk.PublicKey().Marshal(),
@@ -149,7 +150,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
require.NoError(t, err)
// We then create a merkle branch for the test.
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate trie")
proof, err := depositTrie.MerkleProof(0)
require.NoError(t, err, "Could not generate proof")

View File

@@ -19,7 +19,7 @@ import (
// state.eth1_data_votes.append(body.eth1_data)
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
// state.latest_eth1_data = body.eth1_data
func ProcessEth1DataInBlock(ctx context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
func ProcessEth1DataInBlock(_ context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
block := b.Block
if beaconState == nil {
return nil, errors.New("nil state")

View File

@@ -13,6 +13,13 @@ import (
"github.com/prysmaticlabs/prysm/shared/params"
)
// ValidatorAlreadyExitedMsg defines a message saying that a validator has already exited.
var ValidatorAlreadyExitedMsg = "validator has already submitted an exit, which will take place at epoch"
// ValidatorCannotExitYetMsg defines a message saying that a validator cannot exit
// because it has not been active long enough.
var ValidatorCannotExitYetMsg = "validator has not been active long enough to exit"
// ProcessVoluntaryExits is one of the operations performed
// on each processed beacon block to determine which validators
// should exit the state's validator registry.
@@ -37,7 +44,7 @@ import (
// # Initiate exit
// initiate_validator_exit(state, exit.validator_index)
func ProcessVoluntaryExits(
ctx context.Context,
_ context.Context,
beaconState *stateTrie.BeaconState,
b *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {
@@ -162,9 +169,7 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
}
// Verify the validator has not yet submitted an exit.
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
return fmt.Errorf(
"validator has already submitted an exit, which will take place at epoch: %v",
validator.ExitEpoch())
return fmt.Errorf("%s: %v", ValidatorAlreadyExitedMsg, validator.ExitEpoch())
}
// Exits must specify an epoch when they become valid; they are not valid before then.
if currentEpoch < exit.Epoch {
@@ -173,7 +178,8 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
// Verify the validator has been active long enough.
if currentEpoch < validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod {
return fmt.Errorf(
"validator has not been active long enough to exit: %d epochs vs required %d epochs",
"%s: %d epochs vs required %d epochs",
ValidatorCannotExitYetMsg,
currentEpoch,
validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod,
)

View File

@@ -171,7 +171,9 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
err = state.SetSlot(state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, err)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
val, err := state.ValidatorAtIndex(0)
require.NoError(t, err)
val.PublicKey = priv.PublicKey().Marshal()

View File

@@ -37,7 +37,7 @@ import (
// # Verify proposer signature
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
func ProcessBlockHeader(
ctx context.Context,
_ context.Context,
beaconState *stateTrie.BeaconState,
block *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {

View File

@@ -48,7 +48,8 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(state)
require.NoError(t, err)
block := testutil.NewBeaconBlock()
@@ -126,7 +127,8 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
validators[5896].PublicKey = priv.PublicKey().Marshal()
@@ -164,7 +166,8 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
bh.Slot = 9
require.NoError(t, state.SetLatestBlockHeader(bh))
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
validators[5896].PublicKey = priv.PublicKey().Marshal()
@@ -202,7 +205,8 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
parentRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
@@ -247,7 +251,8 @@ func TestProcessBlockHeader_OK(t *testing.T) {
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(state)
require.NoError(t, err)
block := testutil.NewBeaconBlock()
@@ -307,7 +312,8 @@ func TestBlockSignatureSet_OK(t *testing.T) {
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(state)
require.NoError(t, err)
block := testutil.NewBeaconBlock()

View File

@@ -36,7 +36,7 @@ import (
//
// slash_validator(state, proposer_slashing.proposer_index)
func ProcessProposerSlashings(
ctx context.Context,
_ context.Context,
beaconState *stateTrie.BeaconState,
b *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {

View File

@@ -194,6 +194,13 @@ func TestVerifyProposerSlashing(t *testing.T) {
beaconState, sks := testutil.DeterministicGenesisState(t, 2)
currentSlot := uint64(0)
require.NoError(t, beaconState.SetSlot(currentSlot))
rand1, err := bls.RandKey()
require.NoError(t, err)
sig1 := rand1.Sign([]byte("foo")).Marshal()
rand2, err := bls.RandKey()
require.NoError(t, err)
sig2 := rand2.Sign([]byte("bar")).Marshal()
tests := []struct {
name string
@@ -239,7 +246,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
Signature: bls.RandKey().Sign([]byte("foo")).Marshal(),
Signature: sig1,
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
@@ -249,7 +256,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
Signature: bls.RandKey().Sign([]byte("bar")).Marshal(),
Signature: sig2,
},
},
beaconState: beaconState,

View File

@@ -26,7 +26,7 @@ import (
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
func ProcessRandao(
ctx context.Context,
_ context.Context,
beaconState *stateTrie.BeaconState,
b *ethpb.SignedBeaconBlock,
) (*stateTrie.BeaconState, error) {

View File

@@ -15,7 +15,7 @@ import (
)
// retrieves the signature set from the raw data, public key,signature and domain provided.
func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
func retrieveSignatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
@@ -36,7 +36,7 @@ func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domai
}
// verifies the signature from the raw data, public key and domain provided.
func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error {
func verifySignature(signedData, pub, signature, domain []byte) error {
set, err := retrieveSignatureSet(signedData, pub, signature, domain)
if err != nil {
return err

View File

@@ -1,13 +1,11 @@
package spectest
import (
"context"
"path"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params/spectest"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
@@ -26,9 +24,7 @@ func runDepositTest(t *testing.T, config string) {
require.NoError(t, deposit.UnmarshalSSZ(depositFile), "Failed to unmarshal")
body := &ethpb.BeaconBlockBody{Deposits: []*ethpb.Deposit{deposit}}
testutil.RunBlockOperationTest(t, folderPath, body, func(ctx context.Context, state *state.BeaconState, b *ethpb.SignedBeaconBlock) (*state.BeaconState, error) {
return blocks.ProcessDeposits(ctx, state, b)
})
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessDeposits)
})
}
}

Binary file not shown.

Binary file not shown.

View File

@@ -419,7 +419,7 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
}
}
func buildState(t testing.TB, slot uint64, validatorCount uint64) *state.BeaconState {
func buildState(t testing.TB, slot, validatorCount uint64) *state.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{

View File

@@ -14,10 +14,6 @@ import (
"go.opencensus.io/trace"
)
// Balances stores balances such as prev/current total validator balances, attested balances and more.
// It's used for metrics reporting.
var Balances *Balance
// ProcessAttestations process the attestations in state and update individual validator's pre computes,
// it also tracks and updates epoch attesting balances.
func ProcessAttestations(
@@ -56,7 +52,6 @@ func ProcessAttestations(
}
pBal = UpdateBalance(vp, pBal)
Balances = pBal
return vp, pBal, nil
}

View File

@@ -68,7 +68,7 @@ func AttestationsDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Valida
return rewards, penalties, nil
}
func attestationDelta(pBal *Balance, v *Validator, prevEpoch uint64, finalizedEpoch uint64) (uint64, uint64) {
func attestationDelta(pBal *Balance, v *Validator, prevEpoch, finalizedEpoch uint64) (uint64, uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible || pBal.ActiveCurrentEpoch == 0 {
return 0, 0
@@ -182,7 +182,7 @@ func ProposersDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Validator
// Spec code:
// def is_in_inactivity_leak(state: BeaconState) -> bool:
// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
func isInInactivityLeak(prevEpoch, finalizedEpoch uint64) bool {
return finalityDelay(prevEpoch, finalizedEpoch) > params.BeaconConfig().MinEpochsToInactivityPenalty
}
@@ -191,6 +191,6 @@ func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
// Spec code:
// def get_finality_delay(state: BeaconState) -> uint64:
// return get_previous_epoch(state) - state.finalized_checkpoint.epoch
func finalityDelay(prevEpoch uint64, finalizedEpoch uint64) uint64 {
func finalityDelay(prevEpoch, finalizedEpoch uint64) uint64 {
return prevEpoch - finalizedEpoch
}

View File

@@ -122,7 +122,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
base, err := epoch.BaseReward(state, i)
assert.NoError(t, err, "Could not get base reward")
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
assert.Equal(t, uint64(3*base), penalties[i], "Unexpected slashed indices penalty balance")
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
}
nonAttestedIndices := []uint64{434, 677, 872, 791}
@@ -254,7 +254,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
}
}
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
func buildState(slot, validatorCount uint64) *pb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{

View File

@@ -1,6 +1,8 @@
package precompute
import (
"errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -21,8 +23,27 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, pBal *Balance) err
totalSlashing += slashing
}
minSlashing := mathutil.Min(totalSlashing*3, pBal.ActiveCurrentEpoch)
minSlashing := mathutil.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
epochToWithdraw := currentEpoch + exitLength/2
vs := state.ValidatorsReadOnly()
var hasSlashing bool
// Iterate through validator list in state, stop until a validator satisfies slashing condition of current epoch.
for _, v := range vs {
if v == nil {
return errors.New("nil validator in state")
}
correctEpoch := epochToWithdraw == v.WithdrawableEpoch()
if v.Slashed() && correctEpoch {
hasSlashing = true
break
}
}
// Exit early if there's no meaningful slashing to process.
if !hasSlashing {
return nil
}
increment := params.BeaconConfig().EffectiveBalanceIncrement
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch

View File

@@ -13,7 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProcessSlashingsPrecompute_NotSlashed(t *testing.T) {
func TestProcessSlashingsPrecompute_NotSlashedWithSlashedTrue(t *testing.T) {
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Slot: 0,
Validators: []*ethpb.Validator{{Slashed: true}},
@@ -28,6 +28,21 @@ func TestProcessSlashingsPrecompute_NotSlashed(t *testing.T) {
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
}
func TestProcessSlashingsPrecompute_NotSlashedWithSlashedFalse(t *testing.T) {
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Slot: 0,
Validators: []*ethpb.Validator{{}},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance},
Slashings: []uint64{0, 1e9},
})
require.NoError(t, err)
pBal := &precompute.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
require.NoError(t, precompute.ProcessSlashingsPrecompute(s, pBal))
wanted := params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
}
func TestProcessSlashingsPrecompute_SlashedLess(t *testing.T) {
tests := []struct {
state *pb.BeaconState

View File

@@ -0,0 +1,9 @@
package spectest
import (
"testing"
)
func TestRewardsAndPenaltiesMainnet(t *testing.T) {
runRewardsAndPenaltiesTests(t, "mainnet")
}

View File

@@ -36,7 +36,6 @@ go_library(
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",

View File

@@ -6,34 +6,14 @@ import (
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
// SlotSignature returns the signed signature of the hash tree root of input slot.
//
// Spec pseudocode definition:
// def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
// domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot))
// signing_root = compute_signing_root(slot, domain)
// return bls.Sign(privkey, signing_root)
func SlotSignature(state *stateTrie.BeaconState, slot uint64, privKey bls.SecretKey) (bls.Signature, error) {
d, err := Domain(state.Fork(), CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
if err != nil {
return nil, err
}
s, err := ComputeSigningRoot(slot, d)
if err != nil {
return nil, err
}
return privKey.Sign(s[:]), nil
}
// IsAggregator returns true if the signature is from the input validator. The committee
// count is provided as an argument rather than direct implementation from spec. Having
// count is provided as an argument rather than imported implementation from spec. Having
// committee count as an argument allows cheaper computation at run time.
//
// Spec pseudocode definition:
@@ -164,3 +144,23 @@ func ValidateAttestationTime(attSlot uint64, genesisTime time.Time) error {
}
return nil
}
// VerifyCheckpointEpoch is within current epoch and previous epoch
// with respect to current time. Returns true if it's within, false if it's not.
func VerifyCheckpointEpoch(c *ethpb.Checkpoint, genesis time.Time) bool {
now := uint64(timeutils.Now().Unix())
genesisTime := uint64(genesis.Unix())
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
currentEpoch := SlotToEpoch(currentSlot)
var prevEpoch uint64
if currentEpoch > 1 {
prevEpoch = currentEpoch - 1
}
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
return false
}
return true
}

View File

@@ -18,27 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
func TestAttestation_SlotSignature(t *testing.T) {
priv := bls.RandKey()
pub := priv.PublicKey()
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: []*ethpb.Validator{{PublicKey: pub.Marshal()}},
Fork: &pb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
Epoch: 0,
},
Slot: 100,
})
require.NoError(t, err)
slot := uint64(101)
sig, err := helpers.SlotSignature(state, slot, priv)
require.NoError(t, err)
require.NoError(t, helpers.ComputeDomainVerifySigningRoot(state, 0, helpers.CurrentEpoch(state), slot,
params.BeaconConfig().DomainBeaconAttester, sig.Marshal()))
}
func TestAttestation_IsAggregator(t *testing.T) {
t.Run("aggregator", func(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
@@ -70,7 +49,8 @@ func TestAttestation_AggregateSignature(t *testing.T) {
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
@@ -87,7 +67,8 @@ func TestAttestation_AggregateSignature(t *testing.T) {
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv := bls.RandKey()
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
@@ -235,3 +216,12 @@ func Test_ValidateAttestationTime(t *testing.T) {
})
}
}
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
// Genesis was 6 epochs ago exactly.
genesis := time.Now().Add(-1 * time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*params.BeaconConfig().SlotsPerEpoch*6))
assert.Equal(t, true, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 6}, genesis))
assert.Equal(t, true, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 5}, genesis))
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 4}, genesis))
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 2}, genesis))
}

View File

@@ -23,6 +23,15 @@ func BlockRootAtSlot(state *stateTrie.BeaconState, slot uint64) ([]byte, error)
return state.BlockRootAtIndex(slot % params.BeaconConfig().SlotsPerHistoricalRoot)
}
// StateRootAtSlot returns the cached state root at that particular slot. If no state
// root has been cached it will return a zero-hash.
func StateRootAtSlot(state *stateTrie.BeaconState, slot uint64) ([]byte, error) {
if slot >= state.Slot() || state.Slot() > slot+params.BeaconConfig().SlotsPerHistoricalRoot {
return []byte{}, errors.Errorf("slot %d out of bounds", slot)
}
return state.StateRootAtIndex(slot % params.BeaconConfig().SlotsPerHistoricalRoot)
}
// BlockRoot returns the block root stored in the BeaconState for epoch start slot.
//
// Spec pseudocode definition:

View File

@@ -3,6 +3,7 @@
package helpers
import (
"bytes"
"fmt"
"sort"
@@ -12,16 +13,16 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
var committeeCache = cache.NewCommitteesCache()
var proposerIndicesCache = cache.NewProposerIndicesCache()
// SlotCommitteeCount returns the number of crosslink committees of a slot. The
// active validator count is provided as an argument rather than a direct implementation
// active validator count is provided as an argument rather than a imported implementation
// from the spec definition. Having the active validator count as an argument allows for
// cheaper computation, instead of retrieving head state, one can retrieve the validator
// count.
@@ -66,7 +67,7 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committeeIndex uint64) ([]uint64, error) {
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot, committeeIndex uint64) ([]uint64, error) {
epoch := SlotToEpoch(slot)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
@@ -90,9 +91,9 @@ func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committ
}
// BeaconCommittee returns the crosslink committee of a given slot and committee index. The
// validator indices and seed are provided as an argument rather than a direct implementation
// validator indices and seed are provided as an argument rather than a imported implementation
// from the spec definition. Having them as an argument allows for cheaper computation run time.
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, committeeIndex uint64) ([]uint64, error) {
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot, committeeIndex uint64) ([]uint64, error) {
indices, err := committeeCache.Committee(slot, seed, committeeIndex)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
@@ -126,8 +127,7 @@ func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, comm
func ComputeCommittee(
indices []uint64,
seed [32]byte,
index uint64,
count uint64,
index, count uint64,
) ([]uint64, error) {
validatorCount := uint64(len(indices))
start := sliceutil.SplitOffset(validatorCount, count, index)
@@ -148,25 +148,6 @@ func ComputeCommittee(
return nil, err
}
// This updates the cache on a miss.
if featureconfig.Get().UseCheckPointInfoCache {
sortedIndices := make([]uint64, len(indices))
copy(sortedIndices, indices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
count = SlotCommitteeCount(uint64(len(shuffledIndices)))
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledList,
CommitteeCount: count * params.BeaconConfig().SlotsPerEpoch,
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return nil, err
}
}
return shuffledList[start:end], nil
}
@@ -205,7 +186,9 @@ func CommitteeAssignments(
return nil, nil, err
}
proposerIndexToSlots := make(map[uint64][]uint64, params.BeaconConfig().SlotsPerEpoch)
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Proposal epochs do not have a look ahead, so we skip them over here.
validProposalEpoch := epoch < nextEpoch
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
@@ -348,6 +331,12 @@ func UpdateCommitteeCache(state *stateTrie.BeaconState, epoch uint64) error {
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) error {
// The cache uses the state root at the (current epoch - 2)'s slot as key. (e.g. for epoch 2, the key is root at slot 31)
// Which is the reason why we skip genesis epoch.
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
indices, err := ActiveValidatorIndices(state, epoch)
if err != nil {
return err
@@ -356,21 +345,33 @@ func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) er
if err != nil {
return err
}
// The committee cache uses attester domain seed as key.
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
// Use state root from (current_epoch - 1 - lookahead))
wantedEpoch := PrevEpoch(state)
if wantedEpoch >= params.BeaconConfig().MinSeedLookahead {
wantedEpoch -= params.BeaconConfig().MinSeedLookahead
}
s, err := EndSlot(wantedEpoch)
if err != nil {
return err
}
if err := committeeCache.AddProposerIndicesList(seed, proposerIndices); err != nil {
r, err := StateRootAtSlot(state, s)
if err != nil {
return err
}
return nil
// Skip Cache if we have an invalid key
if r == nil || bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
return nil
}
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
BlockRoot: bytesutil.ToBytes32(r),
ProposerIndices: proposerIndices,
})
}
// ClearCache clears the committee cache
func ClearCache() {
committeeCache = cache.NewCommitteesCache()
proposerIndicesCache = cache.NewProposerIndicesCache()
}
// This computes proposer indices of the current epoch and returns a list of proposer indices,

View File

@@ -203,6 +203,36 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
// First 2 epochs only half validators are activated.
var activationEpoch uint64
if i >= len(validators)/2 {
activationEpoch = 3
}
validators[i] = &ethpb.Validator{
ActivationEpoch: activationEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndxs, err := CommitteeAssignments(state, CurrentEpoch(state))
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
_, proposerIndxs, err = CommitteeAssignments(state, CurrentEpoch(state)+1)
require.NoError(t, err)
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -630,7 +660,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
}
func TestUpdateProposerIndicesInCache_CouldNotGetActiveIndices(t *testing.T) {
err := UpdateProposerIndicesInCache(&beaconstate.BeaconState{}, 0)
err := UpdateProposerIndicesInCache(&beaconstate.BeaconState{}, 2)
want := "nil inner state"
require.ErrorContains(t, want, err)
}

View File

@@ -64,7 +64,7 @@ func TotalActiveBalance(state *stateTrie.BeaconState) (uint64, error) {
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
func IncreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
return err
@@ -82,7 +82,7 @@ func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
func IncreaseBalanceWithVal(currBalance, delta uint64) uint64 {
return currBalance + delta
}
@@ -94,7 +94,7 @@ func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
func DecreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
return err
@@ -112,7 +112,7 @@ func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
}

View File

@@ -34,13 +34,13 @@ func SplitIndices(l []uint64, n uint64) [][]uint64 {
// We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays
// constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
func ShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
func ShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
return ComputeShuffledIndex(index, indexCount, seed, true /* shuffle */)
}
// UnShuffledIndex returns the inverse of ShuffledIndex. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
func UnShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
return ComputeShuffledIndex(index, indexCount, seed, false /* un-shuffle */)
}
@@ -64,7 +64,7 @@ func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, er
// index = flip if bit else index
//
// return ValidatorIndex(index)
func ComputeShuffledIndex(index uint64, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
func ComputeShuffledIndex(index, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
if params.BeaconConfig().ShuffleRoundCount == 0 {
return index, nil
}

View File

@@ -60,7 +60,7 @@ func TestSplitIndices_OK(t *testing.T) {
}
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
list := []uint64{}
var list []uint64
listSize := uint64(1000)
seed := [32]byte{123, 42}
for i := uint64(0); i < listSize; i++ {
@@ -124,7 +124,7 @@ func BenchmarkShuffleList(b *testing.B) {
}
func TestShuffledIndex(t *testing.T) {
list := []uint64{}
var list []uint64
listSize := uint64(399)
for i := uint64(0); i < listSize; i++ {
list = append(list, i)

View File

@@ -6,7 +6,6 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
@@ -66,7 +65,7 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
if err != nil {
return [32]byte{}, err
}
container := &p2ppb.SigningData{
container := &pb.SigningData{
ObjectRoot: objRoot[:],
Domain: domain,
}
@@ -74,7 +73,7 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
}
// ComputeDomainVerifySigningRoot computes domain and verifies signing root of an object given the beacon state, validator index and signature.
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
v, err := state.ValidatorAtIndex(index)
if err != nil {
return err
@@ -87,7 +86,7 @@ func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoc
}
// VerifySigningRoot verifies the signing root of an object given it's public key, signature and domain.
func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []byte) error {
func VerifySigningRoot(obj interface{}, pub, signature, domain []byte) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
@@ -107,7 +106,7 @@ func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []b
}
// VerifyBlockSigningRoot verifies the signing root of a block given it's public key, signature and domain.
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) error {
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub, signature, domain []byte) error {
set, err := RetrieveBlockSignatureSet(blk, pub, signature, domain)
if err != nil {
return err
@@ -129,15 +128,13 @@ func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte
// RetrieveBlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
// into a signature set object.
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub, signature, domain []byte) (*bls.SignatureSet, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
root, err := signingData(func() ([32]byte, error) {
// utilize custom block hashing function
return blk.HashTreeRoot()
}, domain)
// utilize custom block hashing function
root, err := signingData(blk.HashTreeRoot, domain)
if err != nil {
return nil, errors.Wrap(err, "could not compute signing root")
}
@@ -149,7 +146,7 @@ func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []b
}
// VerifyBlockHeaderSigningRoot verifies the signing root of a block header given it's public key, signature and domain.
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, signature []byte, domain []byte) error {
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signature, domain []byte) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
@@ -158,9 +155,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, s
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
root, err := signingData(func() ([32]byte, error) {
return blkHdr.HashTreeRoot()
}, domain)
root, err := signingData(blkHdr.HashTreeRoot, domain)
if err != nil {
return errors.Wrap(err, "could not compute signing root")
}
@@ -183,7 +178,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, s
// genesis_validators_root = Root() # all bytes zero by default
// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
// return Domain(domain_type + fork_data_root[:28])
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) {
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValidatorsRoot []byte) ([]byte, error) {
if forkVersion == nil {
forkVersion = params.BeaconConfig().GenesisForkVersion
}
@@ -203,7 +198,7 @@ func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesi
// This returns the bls domain given by the domain type and fork data root.
func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
b := []byte{}
var b []byte
b = append(b, domainType[:4]...)
b = append(b, forkDataRoot[:28]...)
return b
@@ -222,7 +217,7 @@ func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
// current_version=current_version,
// genesis_validators_root=genesis_validators_root,
// ))
func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
func computeForkDataRoot(version, root []byte) ([32]byte, error) {
r, err := (&pb.ForkData{
CurrentVersion: version,
GenesisValidatorsRoot: root,
@@ -243,7 +238,7 @@ func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
// 4-bytes suffices for practical separation of forks/chains.
// """
// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
func ComputeForkDigest(version []byte, genesisValidatorsRoot []byte) ([4]byte, error) {
func ComputeForkDigest(version, genesisValidatorsRoot []byte) ([4]byte, error) {
dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot)
if err != nil {
return [4]byte{}, err

View File

@@ -63,9 +63,9 @@ func TestFuzzverifySigningRoot_10000(t *testing.T) {
pubkey := [48]byte{}
sig := [96]byte{}
domain := [4]byte{}
p := []byte{}
s := []byte{}
d := []byte{}
var p []byte
var s []byte
var d []byte
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(&pubkey)

View File

@@ -2,6 +2,7 @@ package helpers
import (
"fmt"
"math"
"time"
"github.com/pkg/errors"
@@ -82,6 +83,19 @@ func StartSlot(epoch uint64) (uint64, error) {
return slot, nil
}
// EndSlot returns the last slot number of the
// current epoch.
func EndSlot(epoch uint64) (uint64, error) {
if epoch == math.MaxUint64 {
return 0, errors.New("start slot calculation overflows")
}
slot, err := StartSlot(epoch + 1)
if err != nil {
return 0, err
}
return slot - 1, nil
}
// IsEpochStart returns true if the given slot number is an epoch starting slot
// number.
func IsEpochStart(slot uint64) bool {
@@ -100,7 +114,7 @@ func SlotsSinceEpochStarts(slot uint64) uint64 {
}
// VerifySlotTime validates the input slot is not from the future.
func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration) error {
func VerifySlotTime(genesisTime, slot uint64, timeTolerance time.Duration) error {
slotTime, err := SlotToTime(genesisTime, slot)
if err != nil {
return err
@@ -122,14 +136,14 @@ func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration
}
// SlotToTime takes the given slot and genesis time to determine the start time of the slot.
func SlotToTime(genesisTimeSec uint64, slot uint64) (time.Time, error) {
func SlotToTime(genesisTimeSec, slot uint64) (time.Time, error) {
timeSinceGenesis, err := mathutil.Mul64(slot, params.BeaconConfig().SecondsPerSlot)
if err != nil {
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %v", slot, err)
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %w", slot, err)
}
sTime, err := mathutil.Add64(genesisTimeSec, timeSinceGenesis)
if err != nil {
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %v", slot, err)
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %w", slot, err)
}
return time.Unix(int64(sTime), 0), nil
}
@@ -153,7 +167,7 @@ func CurrentSlot(genesisTimeSec uint64) uint64 {
// ValidateSlotClock validates a provided slot against the local
// clock to ensure slots that are unreasonable are returned with
// an error.
func ValidateSlotClock(slot uint64, genesisTimeSec uint64) error {
func ValidateSlotClock(slot, genesisTimeSec uint64) error {
maxPossibleSlot := CurrentSlot(genesisTimeSec) + MaxSlotBuffer
// Defensive check to ensure that we only process slots up to a hard limit
// from our local clock.
@@ -204,3 +218,13 @@ func WeakSubjectivityCheckptEpoch(valCount uint64) (uint64, error) {
}
return wsp, nil
}
// VotingPeriodStartTime returns the current voting period's start time
// depending on the provided genesis and current slot.
func VotingPeriodStartTime(genesis, slot uint64) uint64 {
startTime := genesis
startTime +=
(slot - (slot % (params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch))) *
params.BeaconConfig().SecondsPerSlot
return startTime
}

View File

@@ -96,11 +96,34 @@ func TestEpochStartSlot_OK(t *testing.T) {
{epoch: 1 << 60, startSlot: 1 << 63, error: true},
}
for _, tt := range tests {
state := &pb.BeaconState{Slot: tt.epoch}
ss, err := StartSlot(tt.epoch)
if !tt.error {
require.NoError(t, err)
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", state.Slot)
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", tt.epoch)
} else {
require.ErrorContains(t, "start slot calculation overflow", err)
}
}
}
func TestEpochEndSlot_OK(t *testing.T) {
tests := []struct {
epoch uint64
startSlot uint64
error bool
}{
{epoch: 0, startSlot: 1*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
{epoch: 1, startSlot: 2*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
{epoch: 10, startSlot: 11*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
{epoch: 1 << 59, startSlot: 1 << 63, error: true},
{epoch: 1 << 60, startSlot: 1 << 63, error: true},
{epoch: math.MaxUint64, startSlot: 0, error: true},
}
for _, tt := range tests {
ss, err := EndSlot(tt.epoch)
if !tt.error {
require.NoError(t, err)
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", tt.epoch)
} else {
require.ErrorContains(t, "start slot calculation overflow", err)
}

View File

@@ -1,6 +1,8 @@
package helpers
import (
"bytes"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -29,7 +31,7 @@ func IsActiveValidatorUsingTrie(validator *stateTrie.ReadOnlyValidator, epoch ui
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
}
func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch uint64) bool {
func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch uint64) bool {
return activationEpoch <= epoch && epoch < exitEpoch
}
@@ -42,7 +44,7 @@ func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch
// Check if ``validator`` is slashable.
// """
// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
func IsSlashableValidator(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
func IsSlashableValidator(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
return checkValidatorSlashable(activationEpoch, withdrawableEpoch, slashed, epoch)
}
@@ -51,7 +53,7 @@ func IsSlashableValidatorUsingTrie(val *stateTrie.ReadOnlyValidator, epoch uint6
return checkValidatorSlashable(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), epoch)
}
func checkValidatorSlashable(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
func checkValidatorSlashable(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
active := activationEpoch <= epoch
beforeWithdrawable := epoch < withdrawableEpoch
return beforeWithdrawable && active && !slashed
@@ -175,20 +177,39 @@ func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
// return compute_proposer_index(state, indices, seed)
func BeaconProposerIndex(state *stateTrie.BeaconState) (uint64, error) {
e := CurrentEpoch(state)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return 0, errors.Wrap(err, "could not generate seed")
}
proposerIndices, err := committeeCache.ProposerIndices(seed)
if err != nil {
return 0, errors.Wrap(err, "could not interface with committee cache")
}
if proposerIndices != nil {
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
// For simplicity, the node will skip caching of genesis epoch.
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
wantedEpoch := PrevEpoch(state)
if wantedEpoch >= params.BeaconConfig().MinSeedLookahead {
wantedEpoch -= params.BeaconConfig().MinSeedLookahead
}
s, err := EndSlot(wantedEpoch)
if err != nil {
return 0, err
}
r, err := StateRootAtSlot(state, s)
if err != nil {
return 0, err
}
if r != nil && !bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
proposerIndices, err := proposerIndicesCache.ProposerIndices(bytesutil.ToBytes32(r))
if err != nil {
return 0, errors.Wrap(err, "could not interface with committee cache")
}
if proposerIndices != nil {
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
return 0, errors.Errorf("length of proposer indices is not equal %d to slots per epoch", len(proposerIndices))
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
if err := UpdateProposerIndicesInCache(state, e); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
}
}
seed, err = Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return 0, errors.Wrap(err, "could not generate seed")
}
@@ -201,19 +222,11 @@ func BeaconProposerIndex(state *stateTrie.BeaconState) (uint64, error) {
return 0, errors.Wrap(err, "could not get active indices")
}
if err := UpdateProposerIndicesInCache(state, e); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
return ComputeProposerIndex(state, indices, seedWithSlotHash)
}
// ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer.
//
// This method is more efficient than ComputeProposerIndexWithValidators as it uses the read only validator
// abstraction to retrieve validator related data. Whereas the other method requires a whole copy of the validator
// set.
//
// Spec pseudocode definition:
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
// """
@@ -260,57 +273,6 @@ func ComputeProposerIndex(bState *stateTrie.BeaconState, activeIndices []uint64,
}
}
// ComputeProposerIndexWithValidators returns the index sampled by effective balance, which is used to calculate proposer.
//
// Note: This method signature deviates slightly from the spec recommended definition. The full
// state object is not required to compute the proposer index.
//
// Spec pseudocode definition:
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_BYTE = 2**8 - 1
// i = 0
// while True:
// candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
// random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
// return ValidatorIndex(candidate_index)
// i += 1
// Deprecated: Prefer using the beacon state with ComputeProposerIndex to avoid an unnecessary copy of the validator set.
func ComputeProposerIndexWithValidators(validators []*ethpb.Validator, activeIndices []uint64, seed [32]byte) (uint64, error) {
length := uint64(len(activeIndices))
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hashutil.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(i%length, length, seed, true /* shuffle */)
if err != nil {
return 0, err
}
candidateIndex = activeIndices[candidateIndex]
if candidateIndex >= uint64(len(validators)) {
return 0, errors.New("active index out of range")
}
b := append(seed[:], bytesutil.Bytes8(i/32)...)
randomByte := hashFunc(b)[i%32]
v := validators[candidateIndex]
var effectiveBal uint64
if v != nil {
effectiveBal = v.EffectiveBalance
}
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}
}
// Domain returns the domain version for BLS private key to sign and verify.
//
// Spec pseudocode definition:
@@ -362,7 +324,7 @@ func IsEligibleForActivationQueueUsingTrie(validator *stateTrie.ReadOnlyValidato
}
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue*
func isEligibileForActivationQueue(activationEligibilityEpoch uint64, effectiveBalance uint64) bool {
func isEligibileForActivationQueue(activationEligibilityEpoch, effectiveBalance uint64) bool {
return activationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch &&
effectiveBalance == params.BeaconConfig().MaxEffectiveBalance
}
@@ -395,7 +357,7 @@ func IsEligibleForActivationUsingTrie(state *stateTrie.BeaconState, validator *s
}
// isEligibleForActivation carries out the logic for IsEligibleForActivation*
func isEligibleForActivation(activationEligibilityEpoch uint64, activationEpoch uint64, finalizedEpoch uint64) bool {
func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finalizedEpoch uint64) bool {
return activationEligibilityEpoch <= finalizedEpoch &&
activationEpoch == params.BeaconConfig().FarFutureEpoch
}

View File

@@ -1,18 +1,18 @@
package helpers
import (
"errors"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestIsActiveValidator_OK(t *testing.T) {
@@ -275,6 +275,39 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
roots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := uint64(0); i < params.BeaconConfig().SlotsPerHistoricalRoot; i++ {
roots[i] = make([]byte, 32)
}
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators,
Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: roots,
StateRoots: roots,
})
require.NoError(t, err)
// Set a very high slot, so that retrieved block root will be
// non existent for the proposer cache.
require.NoError(t, state.SetSlot(100))
_, err = BeaconProposerIndex(state)
require.NoError(t, err)
assert.Equal(t, 0, len(proposerIndicesCache.ProposerIndicesCache.ListKeys()))
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
@@ -309,7 +342,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hashutil.Hash(seedWithSlot)
index, err := ComputeProposerIndexWithValidators(state.Validators(), indices, seedWithSlotHash)
index, err := computeProposerIndexWithValidators(state.Validators(), indices, seedWithSlotHash)
require.NoError(t, err)
wantedProposerIndices = append(wantedProposerIndices, index)
}
@@ -743,3 +776,33 @@ func TestIsIsEligibleForActivation(t *testing.T) {
})
}
}
func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeIndices []uint64, seed [32]byte) (uint64, error) {
length := uint64(len(activeIndices))
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hashutil.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(i%length, length, seed, true /* shuffle */)
if err != nil {
return 0, err
}
candidateIndex = activeIndices[candidateIndex]
if candidateIndex >= uint64(len(validators)) {
return 0, errors.New("active index out of range")
}
b := append(seed[:], bytesutil.Bytes8(i/32)...)
randomByte := hashFunc(b)[i%32]
v := validators[candidateIndex]
var effectiveBal uint64
if v != nil {
effectiveBal = v.EffectiveBalance
}
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}
}

View File

@@ -32,6 +32,8 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"//shared/traceutil:go_default_library",

View File

@@ -37,7 +37,6 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
block, err := benchutil.PreGenFullBlock()
require.NoError(b, err)
b.N = runAmount
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block)
@@ -64,7 +63,6 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
_, err = state.ExecuteStateTransition(context.Background(), beaconState, block)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
b.N = runAmount
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block)
@@ -97,7 +95,6 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
beaconState, err := benchutil.PreGenState2FullEpochs()
require.NoError(b, err)
b.N = 50
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := beaconState.HashTreeRoot(context.Background())
@@ -115,7 +112,6 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
_, err = beaconState.HashTreeRoot(ctx)
require.NoError(b, err)
b.N = 50
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := beaconState.HashTreeRoot(ctx)
@@ -131,7 +127,6 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
b.Run("Proto_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
_, err := proto.Marshal(natState)
require.NoError(b, err)
@@ -141,7 +136,6 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
_, err := natState.MarshalSSZ()
require.NoError(b, err)
@@ -161,7 +155,6 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
b.Run("Proto_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
require.NoError(b, proto.Unmarshal(protoObject, &pb.BeaconState{}))
}
@@ -170,7 +163,6 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.N = 1000
for i := 0; i < b.N; i++ {
sszState := &pb.BeaconState{}
require.NoError(b, sszState.UnmarshalSSZ(sszObject))

View File

@@ -1,7 +1,12 @@
package state
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
// SkipSlotCache exists for the unlikely scenario that is a large gap between the head state and
@@ -9,3 +14,14 @@ import (
// difficult or impossible to compute the appropriate beacon state for assignments within a
// reasonable amount of time.
var SkipSlotCache = cache.NewSkipSlotCache()
// The key for skip slot cache is mixed between state root and state slot.
// state root is in the mix to defend against different forks with same skip slots
// to hit the same cache. We don't want beacon states mixed up between different chains.
func cacheKey(ctx context.Context, state *beaconstate.BeaconState) ([32]byte, error) {
r, err := state.HashTreeRoot(ctx)
if err != nil {
return [32]byte{}, err
}
return hashutil.Hash(append(bytesutil.Bytes32(state.Slot()), r[:]...)), nil
}

View File

@@ -1,7 +1,9 @@
package state_test
import (
"bytes"
"context"
"sync"
"testing"
"github.com/prysmaticlabs/go-ssz"
@@ -36,3 +38,123 @@ func TestSkipSlotCache_OK(t *testing.T) {
t.Fatal("Skipped slots cache leads to different states")
}
}
func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
bState, privs := testutil.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
originalState, err := beaconstate.InitializeFromProto(bState.CloneInnerState())
require.NoError(t, err)
blkCfg := testutil.DefaultBlockGenConfig()
blkCfg.NumAttestations = 1
state.SkipSlotCache.Disable()
// First transition will be with an empty cache, so the cache becomes populated
// with the state
blk, err := testutil.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
originalState, err = state.ExecuteStateTransition(context.Background(), originalState, blk)
require.NoError(t, err, "Could not run state transition")
// Create two shallow but different forks
var state1, state2 *beaconstate.BeaconState
{
blk, err := testutil.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
copy(blk.Block.Body.Graffiti, "block 1")
signature, err := testutil.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
state1, err = state.ExecuteStateTransition(context.Background(), originalState.Copy(), blk)
require.NoError(t, err, "Could not run state transition")
}
{
blk, err := testutil.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
copy(blk.Block.Body.Graffiti, "block 2")
signature, err := testutil.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
state2, err = state.ExecuteStateTransition(context.Background(), originalState.Copy(), blk)
require.NoError(t, err, "Could not run state transition")
}
r1, err := state1.HashTreeRoot(context.Background())
require.NoError(t, err)
r2, err := state2.HashTreeRoot(context.Background())
require.NoError(t, err)
if r1 == r2 {
t.Fatalf("need different starting states, got: %x", r1)
}
if state1.Slot() != state2.Slot() {
t.Fatalf("expecting different chains, but states at same slot")
}
// prepare copies for both states
var setups []*beaconstate.BeaconState
for i := uint64(0); i < 300; i++ {
var st *beaconstate.BeaconState
if i%2 == 0 {
st = state1
} else {
st = state2
}
setups = append(setups, st.Copy())
}
problemSlot := state1.Slot() + 2
expected1, err := state.ProcessSlots(context.Background(), state1.Copy(), problemSlot)
require.NoError(t, err)
expectedRoot1, err := expected1.HashTreeRoot(context.Background())
require.NoError(t, err)
t.Logf("chain 1 (even i) expected root %x at slot %d", expectedRoot1[:], problemSlot)
tmp1, err := state.ProcessSlots(context.Background(), expected1.Copy(), problemSlot+1)
require.NoError(t, err)
if gotRoot := tmp1.StateRoots()[problemSlot]; !bytes.Equal(gotRoot, expectedRoot1[:]) {
t.Fatalf("state roots for chain 1 are bad, expected root doesn't match: %x <> %x", gotRoot, expectedRoot1[:])
}
expected2, err := state.ProcessSlots(context.Background(), state2.Copy(), problemSlot)
require.NoError(t, err)
expectedRoot2, err := expected2.HashTreeRoot(context.Background())
require.NoError(t, err)
t.Logf("chain 2 (odd i) expected root %x at slot %d", expectedRoot2[:], problemSlot)
tmp2, err := state.ProcessSlots(context.Background(), expected2.Copy(), problemSlot+1)
require.NoError(t, err)
if gotRoot := tmp2.StateRoots()[problemSlot]; !bytes.Equal(gotRoot, expectedRoot2[:]) {
t.Fatalf("state roots for chain 2 are bad, expected root doesn't match %x <> %x", gotRoot, expectedRoot2[:])
}
var wg sync.WaitGroup
wg.Add(len(setups))
step := func(i int, setup *beaconstate.BeaconState) {
// go at least 1 past problemSlot, to ensure problem slot state root is available
outState, err := state.ProcessSlots(context.Background(), setup, problemSlot+1+uint64(i)) // keep increasing, to hit and extend the cache
require.NoError(t, err, "Could not process state transition")
roots := outState.StateRoots()
gotRoot := roots[problemSlot]
if i%2 == 0 {
if !bytes.Equal(gotRoot, expectedRoot1[:]) {
t.Errorf("unexpected root on chain 1, item %3d: %x", i, gotRoot)
}
} else {
if !bytes.Equal(gotRoot, expectedRoot2[:]) {
t.Errorf("unexpected root on chain 2, item %3d: %x", i, gotRoot)
}
}
wg.Done()
}
state.SkipSlotCache.Enable()
// now concurrently apply the blocks (alternating between states, and increasing skip slots)
for i, setup := range setups {
go step(i, setup)
}
// Wait for all transitions to finish
wg.Wait()
}

View File

@@ -64,7 +64,7 @@ func GenesisBeaconState(deposits []*ethpb.Deposit, genesisTime uint64, eth1Data
return nil, err
}
// Process initial deposits.
leaves := [][]byte{}
var leaves [][]byte
for _, deposit := range deposits {
if deposit == nil || deposit.Data == nil {
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
@@ -77,12 +77,12 @@ func GenesisBeaconState(deposits []*ethpb.Deposit, genesisTime uint64, eth1Data
}
var trie *trieutil.SparseMerkleTrie
if len(leaves) > 0 {
trie, err = trieutil.GenerateTrieFromItems(leaves, int(params.BeaconConfig().DepositContractTreeDepth))
trie, err = trieutil.GenerateTrieFromItems(leaves, params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
} else {
trie, err = trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
trie, err = trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
@@ -249,7 +249,7 @@ func EmptyGenesisState() (*stateTrie.BeaconState, error) {
// return True
// This method has been modified from the spec to allow whole states not to be saved
// but instead only cache the relevant information.
func IsValidGenesisState(chainStartDepositCount uint64, currentTime uint64) bool {
func IsValidGenesisState(chainStartDepositCount, currentTime uint64) bool {
if currentTime < params.BeaconConfig().MinGenesisTime {
return false
}

Some files were not shown because too many files have changed in this diff Show More