Compare commits

...

170 Commits

Author SHA1 Message Date
Raul Jordan
9b367b36fc add happy/sad tests (#8279) 2021-01-18 11:32:17 -08:00
pinglamb
09a792ded4 isCanonical for slot 0 should return true (#8269)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-16 16:14:14 +00:00
terence tsao
cf343be76a Add a helper to validate nil attestation (#8272)
* Add verify nil attestation function and apply all

* Remove invalid attestatione debug log

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-01-15 22:19:17 +00:00
Raul Jordan
4c19e622cd Implement Migration Up/Down Logic for Validator DB (#8271)
* rollback logic

* implement up down logic

* begin down migration tests

* rollback works

* unset test

* remove iface

* gaz

* add comment

* fix ineff assign

* preston comment

* add progress
2021-01-15 15:35:21 -06:00
pinglamb
d7d2c6354b Blocks filtering should return genesis when startSlot=0 and endSlot=0 (#8270)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-15 18:36:17 +00:00
terence tsao
b6c4bc197f Add on_block finality test (#8266)
* Can generate at epoch boundary

* Remove extra space

* Add on block finality test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-15 13:42:49 +00:00
Nishant Das
ce397ce797 Prune Excess Peers Better (#8260)
* add method

* add changes

* formatting

* choose

* fix waitgroup

* add

* add debug logs

* gaz

* make it better

* fix

* godoc

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-14 21:28:20 +00:00
Radosław Kapka
2d75b12791 Implement GetIdentity in the node API (#8230)
* initial implementation

* register metadata provider

* final implementation

* tests

* fixed imports

* gazelle

* code review

* small cleanup

* change errors.Wrap to status.Errorf

* gazelle + goimports

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-14 20:48:40 +00:00
terence tsao
40155c9828 Testutil: Can generate attestation at epoch boundary (#8265) 2021-01-14 10:22:53 -08:00
Radosław Kapka
a2d4e3302c Implement GetPeer in the node API (#8264)
* tests

* capitalize error message
2021-01-14 10:26:16 -06:00
Nishant Das
612e6ebdc4 gogo protobuf (#8263) 2021-01-14 17:17:26 +08:00
Raul Jordan
fff6472a04 Allow Multiple Targets Per Source Epoch in Attester Protection (#8262) 2021-01-13 23:23:29 +00:00
terence tsao
daf6da5beb Add hydrate indexed att test helper (#8261)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-01-13 22:05:57 +00:00
pinglamb
9369bb6781 Copied over ssz.DeepEqual as sszutil.DeepEqual from go-ssz (#8258)
* Copied over ssz.DeepEqual as sszutil.DeepEqual from go-ssz

* Added test cases for DeepEqual

* Remove commented code
2021-01-13 20:40:56 +00:00
terence tsao
eeda9f18fe Disallow duties request where req.Epoch > current.Epoch+1 (#8252)
* Disallow request epoch to be out of bound

* Input Slot and epoch checks

* Tests

* Review feedbacks

* Unavailable error code

* Rename genesis time fetcher to time fetcher

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 23:27:37 +00:00
Preston Van Loon
e967a65b68 Update prometheus/client_golang (#8256) 2021-01-12 22:56:57 +00:00
Preston Van Loon
c87ef2f0e7 Coverage results upload fix for CI (#8255)
* Check in gocovmerge, add all-in-one coverage.sh script for CI

* annotate script

* Annotate tools/gocovmerge/main.go

* deepsource suggestions
2021-01-12 20:12:09 +00:00
pinglamb
1a9207ba46 Removed some simple go-ssz usage (#8250)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-12 19:31:34 +00:00
pinglamb
9f423617cb Added db.BlocksBySlot and db.BlockRootsBySlot (#8184)
* Added blockBySlot and blockRootBySlot

* Changed to BlocksBySlot and BlockRootsBySlot

* Updated to use BlocksBySlot and BlockRootsBySlot

* Added missing passthrough to karfa exporter

* Return hasBlocks/hasBlockRoots in the new getters

* Fixed CI lint

* Replace call to bytes.Compare with bytes.Equal

* Reordered the returns of the new getters

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-01-12 18:31:15 +00:00
Radosław Kapka
015102c2d5 Implement GetSyncStatus in the node API (#8241)
* Implement GetSyncStatus in the node API

* gazelle

* add HeadSlot function to fuzz tests' fakeChecker

* use HeadFetcher to get head slot

* remove useless code

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 17:17:20 +00:00
Radosław Kapka
aa69e5edcc Allow to create an empty imported wallet (#8251)
* reload keys into empty wallet

# Conflicts:
#	validator/accounts/accounts.go

* removed warning on wallet creation

* export AccountsKeystoreRepresentation type

* rename error message variable

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 16:52:01 +00:00
Radosław Kapka
5dda2ca328 Replace HTTP code with gRPC code in GetHealth API endpoint (#8249)
* Replace HTTP code with gRPC code in GetHealth API endpoint

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 16:20:20 +00:00
Raul Jordan
470d5aa491 Cleanup Attester DB Protection Files and Add Back Save Lowest Epochs Functionality (#8232)
* cleanup for att protection

* rename to deprecated

* rem old item

* imports
2021-01-12 09:32:13 -06:00
Raul Jordan
d2bd954a6c Remove Snappy from Validator Slashing Protection DB (#8248)
* no snappy

* Update validator/db/kv/migration_optimal_attester_protection.go

* fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 01:10:11 +00:00
Ivan Martinez
e5556db49d Add DepositMessage in preparation to remove go-ssz (#8244)
* Add DepositSigningData

* gaz

* Add to ssz tests

* Rename to DepositMessage

* Remove deprecated comment

* Remove return

* Fixes from review

* Fixes

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-12 00:45:11 +00:00
Raul Jordan
d97596348e Add Back New Attester Protection DB Logic (#8242)
* Revert "Revert New Attester Protection DB Logic (#8237)"

This reverts commit 6738fa3493.

* Batch Attestation Records and Flush All at Once in Validator DB (#8243)

* begin flushing logic

* finalize logic before starting tests

* make code DRY

* better log fields

* gaz

* tweak parameter

* rename

* clarifying comment on error handling in event feed

* comprehensive tests

* more comments

* explain parameters in comments

* renamed consts

* Apply suggestions from code review

* gaz

* simplify

* typo

* comments
2021-01-11 23:59:17 +00:00
terence tsao
323eac6d6c Remove timeout debug log (#8247)
* Disallow request epoch to be out of bound

* Remove debug log

* Revert "Disallow request epoch to be out of bound"

This reverts commit d2dc7db594.
2021-01-11 22:23:13 +00:00
Radosław Kapka
5fd03f8fb0 Unify the pattern of using a package-level logger (#8245)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-11 20:03:28 +00:00
terence tsao
18bb86754a Hydrate signed block helper (#8246)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-01-11 19:27:30 +00:00
Preston Van Loon
97320a0a8e Remove snappy compression migration for attestation history (#8238)
* Remove snappy compression migration for attestation history

* gofmt

* remove more snappy stuff

* revert validator/db/kv/historical_attestations.go
2021-01-11 09:28:34 -08:00
terence tsao
9a1866b735 Hydrate header test helper (#8234)
* Hydrate headers and fix tests

* Gazelle

* Fix a slashing test

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-01-09 16:45:45 +00:00
Raul Jordan
6738fa3493 Revert New Attester Protection DB Logic (#8237)
* Revert "Optimize Migration for new Attester Protection DB (#8231)"

This reverts commit c4ab67832f.

* Revert "Integrate New Slashing Protection DB Methods at Runtime (#8219)"

This reverts commit 3858068201.

* Revert "DB Migration for Optimal Local Slashing Protection (#8212)"

This reverts commit dd3ac6c2ed.
2021-01-09 04:00:56 +00:00
terence tsao
35ed01e36c Add timely attest flag to fix #8185 (#8235) 2021-01-08 17:54:46 -08:00
Raul Jordan
c4ab67832f Optimize Migration for new Attester Protection DB (#8231)
* migrate using tx commit

* add improvement

* fixed up test

* rem fmt

* gaz

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-01-08 17:31:54 +00:00
Nishant Das
0ff2a53b2f Add Peer Logger (#8226)
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-01-08 11:08:11 +00:00
Preston Van Loon
25bba9f43f Fuzzing: conditional BLS enabled via environment variable (#8229)
* Read whether or not to enable BLS via environment variable

* Read whether or not to enable BLS via environment variable
2021-01-08 05:31:24 +00:00
Raul Jordan
3858068201 Integrate New Slashing Protection DB Methods at Runtime (#8219)
* integrate at runtime and revamp tests

* historical att

* Update validator/db/iface/interface.go

* deepsource

* import

* log the slashing kind

* gaz

* create a slashutils

* integrate new slashutil

* imports
2021-01-07 23:30:25 +00:00
Raul Jordan
0d5e2cfb27 Prune Validator Attester Records Older than Weak Subjectivity Periods (#8221)
* pruning and begin test

* comprehensive pruning tests

* add pruning on startup

* also prune source epochs bucket

* more testing

* greatly simplify pruning function

* pruning logic and comprehensive tests in

* att protection test

* gaz

* fix sneaky change

* rev

* documented and tested helper func
2021-01-07 15:40:37 -06:00
terence tsao
bc650c82b4 Hydrate attestation for tests (#8228) 2021-01-07 21:00:21 +00:00
Nishant Das
a855f282c6 Fix to TLS v1.3 (#8222)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-07 19:56:51 +00:00
Raul Jordan
4253888a36 Update Bitfield Dependency (#8227)
* bitfield updates

* tidy

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-07 18:12:49 +00:00
Nishant Das
fb9f4e828d Update FastSSZ To Latest Commit (#8225)
* update fast-ssz

* fix

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-07 17:08:41 +00:00
terence tsao
9ff825a570 Update variable names that are same as imports (#8220)
* Fix names that are same as import name

* Review feedbacks
2021-01-07 10:42:03 -06:00
Victor Farazdagi
d20065218c Max-cover: remove redundant dedup routine (#8224)
* Attestation aggregration: remove redundant dedup routine

* fix func call

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-07 14:18:29 +00:00
Radosław Kapka
353c1f6387 Multiple discovery addresses (#8203)
* multiple discovery addresses

* ipv6 fix

* make len test more robust

* create enr node for testing

* use local node for test

* use mockListener

* remove unused type alias
2021-01-07 13:35:42 +00:00
pinglamb
1b6a0703e3 Show number of user pubkeys for attesting and proposing in "Next duty" log output (#8187)
* Show attesting and proposing counts in "Next duty" log output

* Use map to store all the counts instead of magic reset

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-01-07 10:22:38 +00:00
pinglamb
9135774720 Block Fetching with Graceful Retry (#8182)
* Added tooMuchDataRequestedError func

* Added multiplicative decrease and additive increase

* Code review changes

* Renaming the new constants

* Return a different error in graceful retry

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2021-01-07 06:21:44 +00:00
Potuz
e52c3d48cf Start from last valid root in loadStateBySlot (#8218)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-01-06 18:41:44 -06:00
terence tsao
ba9b563e6e Fix Attest early if valid block is received before 4 seconds (#8197)
* Better int -> string conversion

* First take

* More tests

* Gazelle

* Remove validator subpackage visiblity

* Remove span

* Update validator/client/attest.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update validator/client/attest.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update stream blocks call to use verified only

* Rename to waitOneThirdOrValidBlock

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-06 21:10:26 +00:00
Raul Jordan
dd3ac6c2ed DB Migration for Optimal Local Slashing Protection (#8212)
* begin migration logic

* wrote migration logic

* begin test file

* test for migration working

* gaz

* progressutil

* migration works even if partial data was written
2021-01-06 20:41:31 +00:00
Radosław Kapka
9b3e1eb643 Implement GetHealth in the node API (#8217)
* Implement GetHealth in the node API

* repair fuzz mock

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-01-06 20:11:20 +00:00
Victor Farazdagi
da59fdd22b Attestation Aggregation: optimize attestation list validation (#8213)
* optimize candidate validation on att aggregation

* restore test

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-06 19:26:19 +00:00
Shay Zluf
f014374de2 Move attestation protection call after signing (#8216)
* Move validator protection after signing

* fix tests

* remove unused code
2021-01-06 11:41:00 -06:00
Radosław Kapka
392e61fbee Revert changes to sync service status check (#8215) 2021-01-06 11:19:53 +00:00
Radosław Kapka
7135a8542f Check if initial sync service has been initialized (#8214) 2021-01-06 10:45:22 +00:00
Raul Jordan
c354871762 Optimal Local Slashing Protection DB Schema (#8211)
* add new approach for slashing protection

* benches

* tests passing

* gaz

* all tests passing

* comment
2021-01-06 03:04:46 +00:00
Radosław Kapka
bc2cd29d4b Implement GetVersion in the node API (#8207)
* initial implementation

* implement GetVersion

* remove implementation of GetIdentity

* remove MetadataProvider from server

* gzl
2021-01-05 21:06:51 +00:00
terence tsao
023e258f6a Stream verified block (#8206) 2021-01-05 12:40:11 -08:00
Victor Farazdagi
9d737d60f4 Declare err in loop to limit its scope (#8200)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-05 13:55:23 +00:00
Victor Farazdagi
1abe92fd8b Remove redundant parentheses around nil values (#8199)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-05 13:29:46 +00:00
Victor Farazdagi
e5c69bd387 Add space at the start of comments (#8202)
* add space at the start of comments

* undo case change
2021-01-05 13:09:41 +00:00
Victor Farazdagi
318f83957a Fix data races in tests accessing beacon config concurrently (#8190)
* add mutex to params/config

* split config files into test/prod

* add tags checker

* add regression test

* remove debug info

* update bazel config

* go fmt

* make sure that conditional file is kept by gazelle

* update build tag: test -> develop

* gazelle

* remove redundant import

* fix data race in TestService_ReceiveBlock
2021-01-05 04:51:25 +00:00
Raul Jordan
f67f8dd6df Configurable Interval for Debouncing Keystore Reloading in Validator Client (#8149)
* configurable keystores debounce interval

* use a time duration flag instead

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-05 00:52:12 +00:00
Jeff Widmer
0e5da504f4 Fix for prysm.bat issue 'Validator' is not recognized as an internal or external command (#8097)
Fix for the issue where if either the beacon, validator, or slasher are already up to date, then the message:
"'Validator/Beacon/Slasher' is not recognized as an internal or external command, operable program or batch file."
The issue was that the message  "Validator/Beacon/Slasher is up to date." is not echoed to the console but it is trying to be run as a command.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-04 23:27:38 +00:00
Cipio
f6af79f415 Fix file permission checks for Windows (#8164)
* Does bitwise compare for MKDIR

* Implemented a more global solution to windows permission issues

* Remove unneeded line I added earlier.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-01-04 22:48:42 +00:00
Radosław Kapka
af2c36ec40 Add names to certain return values in accounts (#8159)
* add names to certain return values

* remove redundant

* rename selected public keys

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 21:44:15 +00:00
terence tsao
5dc8eb45d3 Add slot in epoch field for `Attestation schedule..." log (#8166)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 21:10:32 +00:00
Victor Farazdagi
b54743edbf Add mutex to params/config (#8160)
* add mutex to params/config

* split config files into test/prod

* add tags checker

* add regression test

* remove debug info

* update bazel config

* go fmt

* make sure that conditional file is kept by gazelle

* update build tag: test -> develop

* gazelle

* remove redundant import

* update deps.md (per Nishant's suggestion)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 20:48:39 +00:00
Victor Farazdagi
04b2e0776d Fix import shadowing of state package (#8191)
* update shadowed var name

* update var names

* remove unnecessary delta

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 20:07:12 +00:00
Shay Zluf
70da296a3b Bring back disable slashing broadcast flag (#8141)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 19:36:17 +00:00
Mohamed Mansour
2defff0886 Add new GetVersion API to Health Service (#8167)
* Add new GetVersion API to Health Service

This is to support showing version information in the web ui.
Since health.go is built through validator bazel, we can use
`shared.GetVersion` directly.

Backend for: prysmaticlabs/prysm-web-ui#107

* Run goimports and update-go-pbs

* go mod tidy

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-01-04 18:51:52 +00:00
Victor Farazdagi
ee8aacbbbf Remove redundant lambda around cleanup code (#8192)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 18:16:18 +00:00
terence tsao
4055841952 Update int -> string conversion to make go test happy (#8183)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 17:53:59 +00:00
terence tsao
bf673ecb12 Validator nil duty should not panic (#8171)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-01-04 17:28:00 +00:00
Victor Farazdagi
7c25d5c852 Compare strings using strings.EqualFold() (#8193) 2021-01-04 16:46:22 +00:00
Raul Jordan
4c6e0c5f46 Update Prysm Web UI to Beta.3 (#8163)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-18 23:25:48 +00:00
Raul Jordan
768994550c Fix Unknown Validator Edge Case in ListValidatorBalances (#8162)
* ensures unknown validators do not mess up rest of api response

* rem old test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-18 22:51:44 +00:00
Victor Farazdagi
f038d782c2 Fix issue with custom chain/network ID configuration (#8147)
* move chain/network id to beacon config

* go fmt

* improve tests

* validator params

* update deposit contract address

* complete tests

* re-arrange comments

* Less mis-leading comment, per Terence's review

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-18 22:22:48 +00:00
terence tsao
ff64fdcfb5 Flag to enable duty count down (#8161)
* Add a flag to enable count down

* Add tests
2020-12-18 21:12:57 +00:00
Victor Farazdagi
d8c31b79df Fix import shadowing of db package (#8158)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-18 19:12:30 +00:00
Nishant Das
ea88799585 Revert Inbound Peer Limit (#8155)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-18 18:33:31 +00:00
Raul Jordan
72dc43989f Stream Validator and Beacon Logs via gRPC Streams (#8150)
* implement validator logs stream

* fix test

* tidy

* proto regen

* add logs stream to the beacon node

* beacon logs working

* impl

* pass test

* gaz

* rem lock

* fix space
2020-12-18 18:03:24 +00:00
Victor Farazdagi
e772e8c8c2 Init sync: minor style fixes (#8156)
* simplify FSM calls

* improve calculateHeadAndTargetEpochs API

* simplify

* simplify nil assignment
2020-12-18 14:31:15 +00:00
terence tsao
df93affb4e Move hot state cache to stategen (#8153)
* Move hot state cache to stategen

* Fix build.bazel

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-17 20:40:47 +00:00
Raul Jordan
d19c57cdb6 Remove Logout from Validator RPC Authenticated Paths (#8151)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-17 19:46:27 +00:00
terence tsao
756ccbe5e4 Update Attestation schedule... log for clarify (#8148)
* Don't use fraction, use different fields

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Go fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-17 19:12:41 +00:00
Nishant Das
46c67f1e9e Fallback To Historical Sync For Powchain (#8146)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-17 18:49:22 +00:00
Nishant Das
44c3adb367 Add Public Method To Retrieve Discovery Address (#8143)
* add method

* fix

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-17 18:03:18 +00:00
Radosław Kapka
25b151ab78 Make TLS mandatory by default when unmarshalling remote wallet options (#8133)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-12-17 16:59:27 +00:00
Raul Jordan
f75b8a3be1 Implement Unified Validator API Endpoints for Beacon Chain Information (#8139)
* define all endpoints

* wrapper

* implement required validator endpoints

* begin impl

* implement remaining endpoint

* imports

* add in list validators

* intercepter

* test added
2020-12-17 16:26:32 +00:00
Nishant Das
dfdf77cb95 Add Instead of Subtract in Epoch Boundary Check (#8145)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-12-17 14:31:05 +00:00
Radosław Kapka
d5bf8376c2 Clean up account logs and errors (#8142)
* small fixes in logs and errors

* fix error message in test

* found a period hiding in an error message

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-17 13:37:06 +00:00
Nishant Das
e2d7ec6f97 Refactor Method Signatures For Powchain (#8110)
* checkpoint

* fix tests

* fix visibility

* fix

* victor's review

* remove redundant LF

* remove redundant LF

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-17 12:33:34 +00:00
Raul Jordan
d650034734 Lock Only When Needed When Fetching Attesting History (#8140)
* lock when needed in attesting history

* rw
2020-12-17 00:18:38 +00:00
Preston Van Loon
a7cf77fc26 Update rules docker to include https://github.com/bazelbuild/rules_docker/pull/1666, which updates the base images (#8136)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-16 14:56:38 -06:00
terence tsao
0dcbf177aa Fix participation query returning balances of orphaned chain (#8137)
* Check if block is canonical

* Add tests and better comments

* Grammar

* Update test for nil state
2020-12-16 13:31:34 -06:00
Nishant Das
82bba593eb Do Not Verify Attestations When Packing Them (#8135)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-16 18:06:49 +00:00
Raul Jordan
148e7fcd59 Consolidate Required Web UI Endpoints Into Single Protobuf File (#8127)
* define required endpoints for unifying backends

* proto definitions

* impl

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-16 17:30:48 +00:00
terence tsao
20dede7532 Move state summary cache to DB (#8101) 2020-12-16 08:56:21 -08:00
Preston Van Loon
3fb49433a1 Limit prometheus requests in flight, set timeout to reasonable 30 seconds (#8130) 2020-12-16 04:57:02 +00:00
Preston Van Loon
4326cbbf08 Validator database: use snappy compression on encoded attestation history (#8129)
* Remove old buckets, rename new buckets to be canonical

* Add migration and test

* Do the migrations

* Gazelle

* Clearify code, code review

* gofmt
2020-12-16 03:33:04 +00:00
terence tsao
dc27cd7a1e Pass context to NewKVStore (#8125)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-15 22:07:01 +00:00
terence tsao
0449cd3450 Allow update cache at last slot of the epoch (#8094)
* Epoch boundary updates at the last slot of the epoch

* Report metric on first slot

* Remove comment

* Add locks to param config

* Remove lock for copy

* Revert "Add locks to param config"

This reverts commit 79d5130b58.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-15 21:19:02 +00:00
terence tsao
6244163770 FC test coverage improvement (#8120)
* Test coverage improvements

* Gazelle

* Update namings

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-15 20:44:35 +00:00
Victor Farazdagi
f5c87075f2 Fix issue with roaming data dir on Wins (#8095)
* patch beacon node

* make sure that check is executed before anything else

* fix format

* make sure that check is reusable by other executables

* debug infor

* fix typo

* more debug info

* use copydir

* cleanup

* better explanation

* gazelle

* go fmt

* debug tos permissions

* upadte copydir

* gazelle

* better check of tos acceptance

* expand path

* update validator

* move fixing func

* move fixing method

* make sure that updater works both on main and subcommands

* remove from startnode

* add copydir test

* add DirFiles method and tests

* fix test

* add and test HashDir

* update tests

* fix test

* add datadir removal

* update messages

* further update messages
2020-12-15 14:01:51 -06:00
Preston Van Loon
ad7d3c74cc Validator DB cleanup: remove obsolete buckets (#8122) 2020-12-15 18:24:56 +00:00
Nishant Das
508c5fcf2f More Efficient Validation of Proposer Index (#8107)
* metric

* make it better

* make it better

* gaz

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-15 16:27:05 +00:00
Victor Farazdagi
a0c475671c Add extra methods to shared/fileutil (#8117)
* add extra methods to fileutil

* Shay's suggestion

* Update shared/fileutil/fileutil.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* Adds reference to the original implementation

Co-authored-by: Shay Zluf <thezluf@gmail.com>
2020-12-15 14:18:15 +00:00
Preston Van Loon
72a92fe708 CI: Disable remote caching of GoStdLib (#8111)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-15 05:58:52 +00:00
terence tsao
6a5589f99e Skip proposer indices cache update if exists (#8096)
* Check if cache is empty before update

* Add tests

* Fix tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-15 00:09:30 +00:00
terence tsao
70c0bb106b Beacon node code health improvements (#8109)
* Apply code health fixes after code inspect

* Remove attestationRoot

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-14 23:41:24 +00:00
Victor Farazdagi
0f18867f08 Remove duplicate package imports (#8104)
* remove duplicate imports

* remove unused lookupLimit

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-14 22:22:55 +00:00
Cipio
630d57377a Fix Slasher Backup DB panic on call. (#8099) 2020-12-14 21:39:26 +00:00
terence tsao
3e9d721280 Validator code health improvements (#8106) 2020-12-14 21:10:02 +00:00
Steven Allen
2428880058 Update go-libp2p to 0.12.0 (#8015)
* Update go-libp2p to 0.12.0

go-libp2p 0.12.0 made some significant changes to the stream interfaces around
stream closing:

* Close now closes in both directions and frees the stream. However, unlike
FullClose did, it doesn't _wait_ for the remote peer to respond with an EOF.
* To close for writing, call CloseWrite (like one would on a TCP connection, etc.).

This patch:

* Replaces calls to FullClose with Close where appropriate.
* Replaces calls to Close with CloseWrite where appropriate.
* Removes redundant Close calls.
* Calls Reset to where appropriate to indicate that the request/response was
  aborted. Unlike Close, this will not flush and will not cause the remote peer
  to read an EOF. Instead, the remote peer will read an ErrReset error.
* Ensures we always either close or reset streams. Send wasn't closing the
  stream on some error paths.
* Now that stream closing is async, we explicitly wait for a response when
  "hanging up" on a peer (so we don't hang up before they receive our
  response/goodbye message).

* update bazel

* Gazelle

* revert unintentional bazel workspace change

* appease an overzealous linter

* update to latest

* Refactor encoder

* gazelle

* Gazelle

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-14 17:22:25 +00:00
Potuz
4d1f01aacc Change block_arrival_latency buckets to exponential format (#8065)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-12-14 16:00:07 +00:00
Victor Farazdagi
b9848dc94f Remove unused vars in tests (#8103)
* update process_block_test

* service_attester_test

* update service and rpc_status tests

* go fmt
2020-12-13 05:23:13 +00:00
Nishant Das
579335f81a Remove Saving Of Target And Source Epoch (#8102)
* remove

* skip
2020-12-13 04:26:18 +00:00
Nishant Das
11bbf06d03 Add Inbound Peer Limit (#7942)
* add changes

* fix up

* fix

* add test

* fix test

* fix again

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-12 03:46:55 +00:00
Nishant Das
29804fa572 Refactor Subnet Search (#8048)
* checkpoint progress

* checkpoint

* clean up

* do better

* fix test

* fix

* fix

* preston's review

* fix

* fix

* fix

* add iterator

* go doc

* make it a config parameter

* Apply suggestions from code review

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-11 18:40:56 -08:00
Radosław Kapka
4ec396c025 Refetch validating keys if no keys are fetched (#8000)
* refetch validating keys every 30 seconds

* deduplicate error/log messages

* remove redundant break statement

* comment about execution flow

* move code to wait_for_activation.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-11 20:55:52 +01:00
Raul Jordan
1fbfd52e52 Simpler and Safer Attester Slashing Protection (#8086)
* att sign validations

* eliminate old cached methods and use a simple approach in the db

* redefined db methods

* db package builds

* add multilock to attest and propose

* gaz

* removed concurrency tests that are no longer relevant

* add cache to db functions for attesting history checks

* passing

* add in feature flag --disable-attesting-history-db-cache

* remove lock

* Revert "remove lock"

This reverts commit b1a65020e4.

* comment

* gaz
2020-12-11 12:31:35 -06:00
Mohamed Mansour
2e18df642d Expand Validator Input Errors (#8090)
The reason of why a validator input was failing was very generic,
this allows the actual implementation to state why it failed by
passing the exception forward.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-11 10:57:47 -06:00
Nishant Das
99b3835f19 Add Fallback Option for Eth1 Nodes (#8062)
* fix

* fix tests and change back

* gaz

* change

* ready again

* Update beacon-chain/flags/base.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* radek's review

* Update shared/cmd/helpers.go

* Update shared/cmd/helpers_test.go

* Update shared/cmd/helpers_test.go

* Endpoint/endpoint

Co-authored-by: Shay Zluf <thezluf@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2020-12-11 19:15:04 +08:00
Victor Farazdagi
46d99fdc00 Attestation aggregation: remove proper set duplicates (#8063)
* fix proper-set issue

* move test into dedicated test file

* move functionality from maxcover -> proposer

* adds dedup to proposer

* fix tests

* update tests

* remove redundant proper subset test

* fix bug with identical expression
2020-12-11 11:13:14 +03:00
Preston Van Loon
923e4d3a5e Attempt to reconnect when waiting for activation (#8057)
* Attempt to reconnect when waiting for activation

* rm fuzz/attestation_fuzz.go

* math.Min, not math.Max

* Gofmt

* resolve RVV-B0003
2020-12-10 19:26:31 -06:00
terence tsao
bb9e2ba12c Validator client logs time left to next duty (#8088) 2020-12-10 10:56:18 -08:00
Preston Van Loon
f44b2a35e4 Update waiting validator methods to be context aware (#8078)
* Update waiting validator methods to be context aware

* Enable debug logging for validator in e2e

* invert logic in deferred function, remove for loop

* return early if wait is 0 or less

* overwrite ctx

* t.Stop() chan closure is hanging. Doesnt make sense, so ignoring that cleanup task. It shouldnt happen at runtime anyway

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-12-10 10:00:48 -06:00
terence tsao
00dacbd00d Remove custom block body root and block root methods (#8069)
* Remove custom block body root and block root methods

* Add nil checks and fix tests

* Fmt

* Typo
2020-12-09 12:11:42 -06:00
Radosław Kapka
92736d0188 warn when creating an imported wallet (#8081) 2020-12-09 16:41:26 +01:00
Preston Van Loon
c96db1a122 Add spans to pre and post att signing updates (#8079)
* Add spans to pre and post att signing updates

* span on db method

* Add span to isNewAttSlashable
2020-12-08 23:47:02 -06:00
terence tsao
c5770a2e56 Add beacon block nil body checks (#8077)
* Add nil checks and fix tests

* Gaz

* Revert one minor typo

* Update test

* Remove extra space
2020-12-08 16:27:58 -06:00
terence tsao
ade3b2f2df Add state field count to config (#8068)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-07 21:49:28 -06:00
Radosław Kapka
9d7052796b Restore database CLI command (#8061)
* restore beacon node db

* revert image name

* move restore out of the kv folder

* remove files from kv folder

* go mod tidy

* Remove usage of prometheus testutil

* add yes/no to prompt text

* restore slasher db

* organize imports

* go mod tidy

* restore validator db

* close slasher db

* defer close backup db in tests

* simplify function literal
2020-12-07 21:36:43 +01:00
Shay Zluf
fbbdd94fea Resolve panic on shutdown with offline eth1 node (#8033)
* Resolve panic on shutdown with offline eth1 node

* Move fix tne right place nishant feedback

* fix log message

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-12-07 10:16:23 +00:00
Victor Farazdagi
b51aec6981 Init sync: minor fixes (#8060)
* fixes typo

* simplify code
2020-12-07 09:33:06 +00:00
terence tsao
b4437e6cec Load graffiti from file (#8041)
* Pass graffati file and   use it

* Visibility

* Parse test

* More proposal tests

* Gazelle

* Add sequential functionality

* fix length check

* Update priorities. Specified -> random -> default

* Log warn instead return err

* Comment

* E2e test

* Comment

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-04 23:15:12 +00:00
Arthur Burkart
8ad328d9b3 fix(grpcHeaders): accept values with "=" symbols (#8047)
* fix(grpcHeaders): accept values with equal signs

# What

Before this commit, it was not possible to pass in base64-encoded
content as a header value if it contained an equals sign. This commit
changes the behavior slightly. Rather than ignore key/value pairs where
the value happens to have an equals sign, we assume the first equals
sign delimits the key from the value and pass in the rest of the value
as-is.

Also, instead of printing the header name along with its value, we
print the name, so there is less risk of leaking information into logs
that shouldn't be there.

# Testing

This has not been tested in the context of the full validator client.
Instead, a small example was made to demonstrate the feasibility. The
example is shown here:

```go
package main

import (
	"log"
	"os"
	"strings"

	"github.com/davecgh/go-spew/spew"
	"github.com/urfave/cli/v2"
)

type Config struct {
	GrpcHeadersFlag string
}

func main() {
	app := &cli.App{
		Action: func(c *cli.Context) error {
			for _, hdr := range strings.Split(c.String("grpc-headers"), ",") {
				if hdr != "" {
					ss := strings.Split(hdr, "=")
					spew.Dump(ss[0])
					spew.Dump(strings.Join(ss[1:], "="))
				}
			}
			return nil
		},
		Flags: []cli.Flag{
			&cli.StringFlag{
				Name: "grpc-headers",
				Usage: "A comma-separated list of key value pairs to pass as gRPC headers for all gRPC " +
					"calls. Example: --grpc-headers=key=value",
			},
		},
	}

	err := app.Run(os.Args)
	if err != nil {
		log.Fatal(err)
	}
}
```

Example invocation:

```command
❯ go run main.go --grpc-headers=key=value,Authorization="Basic $(echo -n hello:world | base64)"
(string) (len=3) "key"
(string) (len=5) "value"
(string) (len=13) "Authorization"
(string) (len=22) "Basic aGVsbG86d29ybGQ="
```

* Adds tests to new gRPC header parsing code

* bazel run //:gazelle
2020-12-04 21:31:15 +00:00
Victor Farazdagi
21ede7634e Attestation aggregation: maxcover vs naive aggregation effectiveness test (#8043)
* maxcover performance test

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 17:24:57 +00:00
Victor Farazdagi
57b74283d3 Attestation aggregation: removes redundant code from max-cover benchmarks (#8044)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 16:51:31 +00:00
Victor Farazdagi
be078d6a16 Update TestMain(): do not call os.Exit() explicitly (#8046)
* update workspace

* update testmain
2020-12-04 16:10:07 +00:00
Victor Farazdagi
ccba8cfa5a Update rules_go to v0.24.9 and golang to v1.15.6 (#8045) 2020-12-04 15:28:03 +00:00
Victor Farazdagi
afbfaedea4 Unskip fixed init-sync service test (#8042) 2020-12-04 11:57:46 +00:00
terence tsao
3ce96701de Update attestation schedule log with total attester count (#8013)
* Log attesting total

* Use the right library

* Go fmt

* Use fmt

* No space is better

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 07:02:58 +00:00
simonatsn
d2ba45aad9 Update blst to v0.3.1 and incorporate subgroup changes (#7971)
* Update blst to v0.3.1 and incorporate subgroup changes

* go mod tidy

* gofmt

* Update bzl blst dependency

* Remove unnecessary check for nil

* Run bazel run //:gazelle -- fix

* Update blst to v0.3.2

* fix sha

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-04 06:46:08 +00:00
Raul Jordan
14e1f08208 Add Backup Webhooks to All Prysm Services With DBs (#8025)
* integrate backup webhooks

* pass slasher tests

* fix node

* cmd

* gaz

* read test passes

* radek feedback

* added comment

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-03 22:28:57 +00:00
Victor Farazdagi
647b4cf108 Round robin: half open interval in syncToFinalizedEpoch (#8039) 2020-12-03 21:55:42 +00:00
terence tsao
c090c6a1c5 Revert "Logging with PadLevelText to true" (#8036)
This reverts commit c51754fa8a.

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 20:38:00 +00:00
Victor Farazdagi
7dd0c24fea Skip bogus init-sync test (#8038) 2020-12-03 20:20:04 +00:00
Victor Farazdagi
e1755b6066 Invert enable-sync-backtracking (#8034)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 15:43:26 +00:00
Victor Farazdagi
3092f75ec2 Init sync: conditional syncing to finalized slot (#7999)
* extract sync methods

* add unit test

* gazelle

* Update beacon-chain/sync/initial-sync/round_robin.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* better set back step

Co-authored-by: Shay Zluf <thezluf@gmail.com>
2020-12-03 14:10:51 +00:00
Shay Zluf
821620c520 Add test for local protection genesis attestation (#7977)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 01:02:51 +00:00
Victor Farazdagi
5417e8cf31 Init-sync: enable peer scorer by default (#7974)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 00:45:20 +00:00
Radosław Kapka
323769bf1a Make TLS connections to a remote wallet non-mandatory (#7953)
* disable-remote-signer-tls flag

* use flag in edit-config

* send requests without TLS

* change warning message

* fix account list output test

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 00:18:15 +00:00
terence tsao
c51754fa8a Logging with PadLevelText to true (#8003)
* Use logrus formatter with pad

* Validator

* Fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 23:49:13 +00:00
terence tsao
2153a2d7c3 Remove logging deposit inclusion slot (#8023)
* Remove logging deposit inclusion slot

* Remove old tests

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-02 21:17:47 +00:00
Nishant Das
20514cd97f Fix Interop Mode (#7978)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-02 20:29:36 +00:00
Victor Farazdagi
32f6bfd0a5 update deprecated multiaddr package (#8022) 2020-12-02 11:21:43 -08:00
Victor Farazdagi
c7f7a29d7e remove redundant start slot (#7991)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 20:47:39 +03:00
Nishant Das
387f7b28c1 Add Buffer For Inbound Peers (#8018)
* allow inbound peers

* comment

* gaz

* gaz

* Update deps.bzl

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* add test

* re-arrange imports

* fix up

* fix tests

* gaz

* Update beacon-chain/p2p/service_test.go

* fmt

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Shay Zluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 10:45:28 -06:00
terence tsao
cf3181e2de Update README to include branching strategy (#8006)
* Update README.md

* Update README.md

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-02 09:48:52 -06:00
Mohamed Mansour
9d2fe80140 Fix missing space in error message in account list (#8009)
When validating account list, there was a missing space. This fixes that nitpick.

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-02 16:02:59 +03:00
Shay Zluf
f9c696ed54 Pending block queue lock fixes (#8002)
* Unlock in case of error

* fix comment

* fix comment

* revert one defer

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-12-02 11:50:37 +02:00
Raul Jordan
3bd5e58a5c Merge branch 'master' into develop 2020-12-01 08:06:16 -06:00
Potuz
9a1423d62d Log with field error instead of err (#7998) 2020-12-01 05:38:42 -08:00
Nishant Das
a13de7da11 Remove Exclusion List (#7992)
* remove

* gaz

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-01 02:17:29 +00:00
Raul Jordan
01bf97293f Fix Miscellaneous Deep Source Issues (#8007)
* deep source fixes

* sh fixes

* fix import

* test err

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-11-30 19:55:30 -06:00
terence tsao
645931802f Improve genesis event log and load blocks to fork choice log (#7946)
* Update a few logs

* Go fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-01 00:06:16 +00:00
terence tsao
ea10784a4a Validator logging: return early if no att included (#7979)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-30 23:28:12 +00:00
terence tsao
3af7809964 Save cached state summaries on Stop() (#7988)
* Save cached state summaries on Stop()

* Fixed test and added one more test
2020-11-30 17:08:23 -06:00
616 changed files with 18594 additions and 9037 deletions

View File

@@ -237,3 +237,6 @@ build:remote --remote_timeout=3600
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'

3
.gitignore vendored
View File

@@ -33,3 +33,6 @@ dist
# libfuzzer
oom-*
crash-*
# deepsource cli
bin

View File

@@ -67,3 +67,14 @@ bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_
The deps.bzl file should have been updated with the dependency and any transitive dependencies.
Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should live in deps.bzl.
## Running tests
To enable conditional compilation and custom configuration for tests (where compiled code has more
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
```bash
go test ./beacon-chain/sync/initial-sync -tags develop
```

View File

@@ -15,8 +15,15 @@ A detailed set of installation and usage instructions as well as breakdowns of e
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
## Contributing
## Contributing
### Branches
Prysm maintains two permanent branches:
* master: This points to the latest stable release. It is ideal for most users.
* develop: This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
### Guide
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
## License

View File

@@ -76,9 +76,9 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "1698624e878b0607052ae6131aa216d45ebb63871ec497f26c67455b34119c80",
strip_prefix = "rules_docker-0.15.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.15.0/rules_docker-v0.15.0.tar.gz"],
sha256 = "1286175a94c0b1335efe1d75d22ea06e89742557d3fac2a0366f242a6eac6f5a",
strip_prefix = "rules_docker-ba4310833230294fa69b7d6ea1787ac684631a7d",
urls = ["https://github.com/bazelbuild/rules_docker/archive/ba4310833230294fa69b7d6ea1787ac684631a7d.tar.gz"],
)
http_archive(
@@ -89,10 +89,10 @@ http_archive(
# nogo check fails for certain third_party dependencies.
"//third_party:io_bazel_rules_go.patch",
],
sha256 = "207fad3e6689135c5d8713e5a17ba9d1290238f47b9ba545b63d9303406209c6",
sha256 = "81eff5df9077783b18e93d0c7ff990d8ad7a3b8b3ca5b785e1c483aacdb342d7",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.7/rules_go-v0.24.7.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.7/rules_go-v0.24.7.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
],
)
@@ -100,14 +100,14 @@ http_archive(
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
name = "com_github_gogo_protobuf",
commit = "5628607bb4c51c3157aacc3a50f0ab707582b805",
commit = "b03c65ea87cdc3521ede29f62fe3ce239267c1bc",
patch_args = ["-p1"],
patches = [
"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch",
"//third_party:com_github_gogo_protobuf-equal.patch",
],
remote = "https://github.com/gogo/protobuf",
shallow_since = "1571033717 +0200",
shallow_since = "1610265707 +0000",
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
@@ -156,7 +156,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.15.5",
go_version = "1.15.6",
nogo = "@//:nogo",
)
@@ -352,9 +352,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "117f5366af9cf009354ed1abe02f906168158473461d69c8056984b9b0292619",
sha256 = "edb80f3a695d84f6000f0e05abf7a4bbf207c03abb91219780ec97e7d6ad21c8",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.2/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.3/prysm-web-ui.tar.gz",
],
)

View File

@@ -8,17 +8,20 @@ load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"main.go",
"usage.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/db:go_default_library",
"//beacon-chain/flags:go_default_library",
"//beacon-chain/node:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/fileutil:go_default_library",
"//shared/journald:go_default_library",
"//shared/logutil:go_default_library",
"//shared/maxprocs:go_default_library",

View File

@@ -79,6 +79,7 @@ go_test(
srcs = [
"blockchain_test.go",
"chain_info_test.go",
"checktags_test.go",
"head_test.go",
"info_test.go",
"metrics_test.go",
@@ -90,6 +91,7 @@ go_test(
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
gotags = ["develop"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
@@ -126,6 +128,7 @@ go_test(
name = "go_raceon_test",
srcs = [
"chain_info_norace_test.go",
"checktags_test.go",
"receive_block_test.go",
"service_norace_test.go",
],
@@ -137,6 +140,7 @@ go_test(
# See: https://github.com/etcd-io/bbolt/issues/187.
"-d=checkptr=0",
],
gotags = ["develop"],
race = "on",
tags = ["race_on"],
deps = [

View File

@@ -2,18 +2,14 @@ package blockchain
import (
"io/ioutil"
"os"
"testing"
"github.com/sirupsen/logrus"
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -11,9 +11,9 @@ import (
)
func TestHeadSlot_DataRace(t *testing.T) {
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -22,11 +22,11 @@ func TestHeadSlot_DataRace(t *testing.T) {
}
func TestHeadRoot_DataRace(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
head: &head{root: [32]byte{'A'}},
stateGen: stategen.New(db, sc),
stateGen: stategen.New(beaconDB),
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -36,11 +36,11 @@ func TestHeadRoot_DataRace(t *testing.T) {
}
func TestHeadBlock_DataRace(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
head: &head{block: &ethpb.SignedBeaconBlock{}},
stateGen: stategen.New(db, sc),
stateGen: stategen.New(beaconDB),
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -50,10 +50,10 @@ func TestHeadBlock_DataRace(t *testing.T) {
}
func TestHeadState_DataRace(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
stateGen: stategen.New(db, sc),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB),
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -25,44 +25,44 @@ var _ TimeFetcher = (*Service)(nil)
var _ ForkFetcher = (*Service)(nil)
func TestFinalizedCheckpt_Nil(t *testing.T) {
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], c.FinalizedCheckpt().Root, "Incorrect pre chain start value")
}
func TestHeadRoot_Nil(t *testing.T) {
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
headRoot, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.justifiedCheckpt = cp
@@ -70,9 +70,9 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
}
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.justifiedCheckpt = cp
@@ -81,21 +81,21 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
}
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
c.prevJustifiedCheckpt = cp
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, db, sc)
c := setupBeaconChain(t, beaconDB)
c.prevJustifiedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
@@ -118,15 +118,15 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_UseDB(t *testing.T) {
db, _ := testDB.SetupDB(t)
c := &Service{beaconDB: db}
beaconDB := testDB.SetupDB(t)
c := &Service{beaconDB: beaconDB}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := testutil.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), b))
require.NoError(t, db.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
require.NoError(t, db.SaveHeadBlockRoot(context.Background(), br))
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, br, bytesutil.ToBytes32(r))
@@ -195,8 +195,8 @@ func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
}
func TestHeadETH1Data_Nil(t *testing.T) {
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
assert.DeepEqual(t, &ethpb.Eth1Data{}, c.HeadETH1Data(), "Incorrect pre chain start value")
}
@@ -213,15 +213,15 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
func TestIsCanonical_Ok(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
c := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
assert.Equal(t, true, can)

View File

@@ -0,0 +1,7 @@
// +build !develop
package blockchain
func init() {
log.Fatal("Tests in this package require extra build tag: re-run with `-tags develop`")
}

View File

@@ -93,7 +93,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
if !s.beaconDB.HasStateSummary(ctx, headRoot) {
return nil
}

View File

@@ -15,8 +15,8 @@ import (
)
func TestSaveHead_Same(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
@@ -28,8 +28,8 @@ func TestSaveHead_Same(t *testing.T) {
func TestSaveHead_Different(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldRoot := [32]byte{'A'}
service.head = &head{slot: 0, root: oldRoot}
@@ -61,8 +61,8 @@ func TestSaveHead_Different(t *testing.T) {
func TestSaveHead_Different_Reorg(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldRoot := [32]byte{'A'}
service.head = &head{slot: 0, root: oldRoot}
@@ -95,8 +95,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
state, _ := testutil.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
@@ -107,8 +107,8 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
db, sc := testDB.SetupDB(t)
service := setupBeaconChain(t, db, sc)
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b := testutil.NewBeaconBlock()
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))

View File

@@ -20,17 +20,17 @@ func TestService_TreeHandler(t *testing.T) {
require.NoError(t, err)
ctx := context.Background()
db, sCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
headState := testutil.NewBeaconState()
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
),
StateGen: stategen.New(db, sCache),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)

View File

@@ -21,28 +21,28 @@ import (
func TestStore_OnAttestation(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(db, sc),
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
_, err = blockTree1(db, []byte{'g'})
_, err = blockTree1(beaconDB, []byte{'g'})
require.NoError(t, err)
BlkWithOutState := testutil.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithOutState))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := testutil.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithStateBadAtt))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -52,7 +52,7 @@ func TestStore_OnAttestation(t *testing.T) {
BlkWithValidState := testutil.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithValidState))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
@@ -130,11 +130,11 @@ func TestStore_OnAttestation(t *testing.T) {
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -201,11 +201,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -242,9 +242,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -254,9 +254,9 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -266,9 +266,9 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -279,25 +279,21 @@ func TestAttEpoch_NotMatch(t *testing.T) {
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
d := &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
d := testutil.HydrateAttestationData(&ethpb.AttestationData{})
assert.ErrorContains(t, "beacon block 0x000000000000 does not exist", service.verifyBeaconBlock(ctx, d))
}
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -313,9 +309,9 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -331,9 +327,9 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -355,9 +351,9 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -379,9 +375,9 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -406,9 +402,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -433,9 +429,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
@@ -303,7 +304,12 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
s.stateGen.SaveStateSummary(ctx, signed, blockRoot)
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: signed.Block.Slot,
Root: blockRoot[:],
}); err != nil {
return err
}
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
@@ -330,7 +336,15 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
if postState.Slot() >= s.nextEpochBoundarySlot {
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.NextEpoch(postState)); err != nil {
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
return err
}
@@ -339,7 +353,9 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
if err != nil {
return err
}
// Update committees cache at epoch boundary slot.
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
return err
}
@@ -347,21 +363,22 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
return err
}
}
return nil
}
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte,
state *stateTrie.BeaconState) error {
fCheckpoint := state.FinalizedCheckpoint()
jCheckpoint := state.CurrentJustifiedCheckpoint()
st *stateTrie.BeaconState) error {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body.Attestations {
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
@@ -388,7 +405,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState, initSync bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, st *stateTrie.BeaconState, initSync bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -396,10 +413,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.Si
} else if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
}
if err := s.stateGen.SaveState(ctx, r, state); err != nil {
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, state); err != nil {
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block.Slot)
}
return nil

View File

@@ -63,7 +63,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
if !s.stateGen.StateSummaryExists(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
if !s.beaconDB.HasStateSummary(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -28,28 +28,28 @@ import (
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st := testutil.NewBeaconState()
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(db, validGenesisRoot[:])
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := testutil.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, db.SaveBlock(ctx, random))
assert.NoError(t, beaconDB.SaveBlock(ctx, random))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
@@ -123,18 +123,18 @@ func TestStore_OnBlock(t *testing.T) {
func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
@@ -166,7 +166,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
}
blks[0].Block.ParentRoot = gRoot[:]
require.NoError(t, db.SaveBlock(context.Background(), blks[0]))
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.stateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
@@ -174,11 +174,11 @@ func TestStore_OnBlockBatch(t *testing.T) {
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.genesisTime = time.Now()
@@ -208,11 +208,11 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
lastJustifiedBlk := testutil.NewBeaconBlock()
@@ -237,11 +237,11 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -251,7 +251,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
@@ -270,18 +270,18 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{
BeaconDB: db,
StateGen: stategen.New(db, sc),
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
@@ -307,20 +307,20 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
signedBlock := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, signedBlock))
require.NoError(t, beaconDB.SaveBlock(ctx, signedBlock))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: []byte{'A'}}
st := testutil.NewBeaconState()
require.NoError(t, db.SaveState(ctx, st.Copy(), r))
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
// Could update
s := testutil.NewBeaconState()
@@ -338,9 +338,9 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
@@ -348,13 +348,13 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, db.SaveBlock(ctx, genesis))
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st := testutil.NewBeaconState()
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(db, validGenesisRoot[:])
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
@@ -375,9 +375,9 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
@@ -385,13 +385,13 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, db.SaveBlock(ctx, genesis))
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st := testutil.NewBeaconState()
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(db, validGenesisRoot[:])
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
@@ -415,9 +415,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
@@ -426,7 +426,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st := testutil.NewBeaconState()
@@ -467,7 +467,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
// (B1, and B3 are all from the same slots)
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
b0 := testutil.NewBeaconBlock()
b0.Block.Slot = 0
@@ -531,20 +531,20 @@ func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
if err := db.SaveBlock(context.Background(), beaconBlock); err != nil {
if err := beaconDB.SaveBlock(context.Background(), beaconBlock); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
}
if err := db.SaveState(context.Background(), st.Copy(), r1); err != nil {
if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), r7); err != nil {
if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil {
return nil, err
}
if err := db.SaveState(context.Background(), st.Copy(), r8); err != nil {
if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil {
return nil, err
}
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
@@ -568,9 +568,9 @@ func TestAncestorByDB_CtxErr(t *testing.T) {
func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -593,7 +593,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, db.SaveBlock(context.Background(), beaconBlock))
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock))
}
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
@@ -650,9 +650,9 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -675,7 +675,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, db.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
@@ -702,7 +702,7 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
}
func TestFinalizedImpliesNewJustified(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
type args struct {
cachedCheckPoint *ethpb.Checkpoint
@@ -743,7 +743,7 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
for _, test := range tests {
beaconState := testutil.NewBeaconState()
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
service, err := NewService(ctx, &Config{BeaconDB: db, StateGen: stategen.New(db, sc), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
require.NoError(t, err)
service.justifiedCheckpt = test.args.cachedCheckPoint
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
@@ -777,26 +777,25 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
}
func TestVerifyBlkDescendant(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, b))
require.NoError(t, beaconDB.SaveBlock(ctx, b))
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, b1))
require.NoError(t, beaconDB.SaveBlock(ctx, b1))
type args struct {
parentRoot [32]byte
finalizedRoot [32]byte
finalizedSlot uint64
}
tests := []struct {
name string
@@ -835,7 +834,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
},
}
for _, tt := range tests {
service, err := NewService(ctx, &Config{BeaconDB: db, StateGen: stategen.New(db, sc), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: tt.args.finalizedRoot[:],
@@ -850,9 +849,9 @@ func TestVerifyBlkDescendant(t *testing.T) {
}
func TestUpdateJustifiedInitSync(t *testing.T) {
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -889,3 +888,52 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
service.head = &head{}
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
}
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
cfg := &Config{}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
s, _ := testutil.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &Config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
gs, keys := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.beaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
testState := gs.Copy()
for i := uint64(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := testutil.GenerateFullBlock(testState, keys, testutil.DefaultBlockGenConfig(), i)
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, blk, r))
testState, err = service.stateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
require.Equal(t, uint64(3), service.CurrentJustifiedCheckpt().Epoch)
require.Equal(t, uint64(2), service.FinalizedCheckpt().Epoch)
}

View File

@@ -125,7 +125,7 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
continue
}
hasState := s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasState := s.beaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue

View File

@@ -14,9 +14,9 @@ import (
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1

View File

@@ -51,9 +51,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
Verified: true,
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
SignedBlock: blockCopy,
Verified: true,
},
})
@@ -97,9 +98,10 @@ func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.Sign
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
Verified: true,
Slot: blockCopy.Block.Slot,
BlockRoot: blockRoot,
SignedBlock: blockCopy,
Verified: true,
},
})
@@ -141,9 +143,10 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
s.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
BlockRoot: blkRoots[i],
Verified: true,
Slot: blockCopy.Block.Slot,
BlockRoot: blkRoots[i],
SignedBlock: blockCopy,
Verified: true,
},
})

View File

@@ -117,12 +117,12 @@ func TestService_ReceiveBlock(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, db.SaveState(ctx, genesis, genesisBlockRoot))
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
@@ -131,7 +131,7 @@ func TestService_ReceiveBlock(t *testing.T) {
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -159,11 +159,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
genesis, keys := testutil.DeterministicGenesisState(t, 64)
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, db.SaveState(ctx, genesis, genesisBlockRoot))
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
@@ -172,7 +172,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -242,18 +242,18 @@ func TestService_ReceiveBlockInitialSync(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -323,18 +323,18 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
cfg := &Config{
BeaconDB: db,
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(db, stateSummaryCache),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
@@ -375,9 +375,9 @@ func TestService_HasInitSyncBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -388,9 +388,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -401,9 +401,9 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
db, stateSummaryCache := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
require.NoError(t, err)
s.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 10000000}
s.genesisTime = time.Now()

View File

@@ -175,7 +175,11 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not retrieve genesis state: %v", err)
}
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()))
gRoot, err := gState.HashTreeRoot(s.ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
@@ -202,12 +206,14 @@ func (s *Service) Start() {
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
}
h := s.headBlock().Block
log.WithFields(logrus.Fields{
"startSlot": ss,
"endSlot": h.Slot,
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
if h.Slot > ss {
log.WithFields(logrus.Fields{
"startSlot": ss,
"endSlot": h.Slot,
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
}
}
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
@@ -269,7 +275,11 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
log.Fatalf("Could not initialize beacon chain: %v", err)
}
// We start a counter to genesis, if needed.
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()))
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
// We send out a state initialized event to the rest of the services
// running in the beacon node.

View File

@@ -16,9 +16,9 @@ func init() {
}
func TestChainService_SaveHead_DataRace(t *testing.T) {
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -63,7 +62,7 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummaryCache) *Service {
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
endpoint := "http://127.0.0.1"
ctx := context.Background()
var web3Service *powchain.Service
@@ -87,7 +86,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
require.NoError(t, err)
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
BeaconDB: beaconDB,
HTTPEndPoint: endpoint,
HTTPEndpoints: []string{endpoint},
DepositContract: common.Address{},
})
require.NoError(t, err, "Unable to set up web3 service")
@@ -106,7 +105,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
P2p: &mockBroadcaster{},
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
StateGen: stategen.New(beaconDB, sc),
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
OpsService: opsService,
}
@@ -124,21 +123,21 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
func TestChainStartStop_Initialized(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
s := testutil.NewBeaconState()
require.NoError(t, s.SetSlot(1))
require.NoError(t, db.SaveState(ctx, s, blkRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
// Test the start function.
chainService.Start()
@@ -153,18 +152,18 @@ func TestChainStartStop_Initialized(t *testing.T) {
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
s := testutil.NewBeaconState()
require.NoError(t, db.SaveState(ctx, s, blkRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
// Test the start function.
chainService.Start()
@@ -178,10 +177,10 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
func TestChainService_InitializeBeaconChain(t *testing.T) {
helpers.ClearCache()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
bc := setupBeaconChain(t, db, sc)
bc := setupBeaconChain(t, beaconDB)
var err error
// Set up 10 deposits pre chain start for validators to register
@@ -221,20 +220,20 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
func TestChainService_CorrectGenesisRoots(t *testing.T) {
ctx := context.Background()
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, db, sc)
chainService := setupBeaconChain(t, beaconDB)
genesisBlk := testutil.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
s := testutil.NewBeaconState()
require.NoError(t, s.SetSlot(0))
require.NoError(t, db.SaveState(ctx, s, blkRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
// Test the start function.
chainService.Start()
@@ -247,14 +246,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
}
func TestChainService_InitializeChainInfo(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := testutil.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, genesis))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := testutil.NewBeaconBlock()
@@ -265,11 +264,11 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, headState, headRoot))
require.NoError(t, db.SaveState(ctx, headState, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, headBlock))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
c := &Service{beaconDB: db, stateGen: stategen.New(db, sc)}
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
require.NoError(t, c.initializeChainInfo(ctx))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -287,14 +286,14 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
}
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := testutil.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, genesis))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := testutil.NewBeaconBlock()
@@ -305,10 +304,10 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, headState, headRoot))
require.NoError(t, db.SaveState(ctx, headState, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, headBlock))
c := &Service{beaconDB: db, stateGen: stategen.New(db, sc)}
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
require.NoError(t, err)
@@ -328,21 +327,21 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
hook := logTest.NewGlobal()
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesisBlock := testutil.NewBeaconBlock()
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, db.SaveBlock(ctx, genesisBlock))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlock))
finalizedBlock := testutil.NewBeaconBlock()
finalizedBlock.Block.Slot = finalizedSlot
finalizedBlock.Block.ParentRoot = genesisRoot[:]
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, finalizedBlock))
require.NoError(t, beaconDB.SaveBlock(ctx, finalizedBlock))
// Set head slot close to the finalization point, no head sync is triggered.
headBlock := testutil.NewBeaconBlock()
@@ -350,21 +349,21 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, headBlock))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
headState := testutil.NewBeaconState()
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, db.SaveState(ctx, headState, genesisRoot))
require.NoError(t, db.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, db.SaveState(ctx, headState, headRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headRoot))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: helpers.SlotToEpoch(finalizedBlock.Block.Slot),
Root: finalizedRoot[:],
}))
c := &Service{beaconDB: db, stateGen: stategen.New(db, sc)}
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
@@ -382,9 +381,9 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err = headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, headBlock))
require.NoError(t, db.SaveState(ctx, headState, headRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
hook.Reset()
require.NoError(t, c.initializeChainInfo(ctx))
@@ -399,40 +398,40 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
}
func TestChainService_SaveHeadNoDB(t *testing.T) {
db, sc := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
beaconDB: db,
stateGen: stategen.New(db, sc),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB),
}
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.HashTreeRoot()
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 1
r, err := blk.HashTreeRoot()
require.NoError(t, err)
newState := testutil.NewBeaconState()
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, b, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
newB, err := s.beaconDB.HeadBlock(ctx)
require.NoError(t, err)
if reflect.DeepEqual(newB, b) {
if reflect.DeepEqual(newB, blk) {
t.Error("head block should not be equal")
}
}
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
beaconDB: db,
beaconDB: beaconDB,
}
block := testutil.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
require.NoError(t, err)
state := testutil.NewBeaconState()
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
beaconState := testutil.NewBeaconState()
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -440,12 +439,13 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
s := &Service{
ctx: ctx,
cancel: cancel,
beaconDB: db,
beaconDB: beaconDB,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
stateGen: stategen.New(beaconDB),
}
b := testutil.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
@@ -456,10 +456,10 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
}
func BenchmarkHasBlockDB(b *testing.B) {
db, _ := testDB.SetupDB(b)
beaconDB := testDB.SetupDB(b)
ctx := context.Background()
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
}
block := testutil.NewBeaconBlock()
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
@@ -474,19 +474,19 @@ func BenchmarkHasBlockDB(b *testing.B) {
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
db, _ := testDB.SetupDB(b)
beaconDB := testDB.SetupDB(b)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
beaconDB: db,
beaconDB: beaconDB,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
require.NoError(b, err)
bs := &pb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
state, err := beaconstate.InitializeFromProto(bs)
beaconState, err := beaconstate.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -47,6 +47,7 @@ type ChainService struct {
ValidAttestation bool
ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error
Slot *uint64 // Pointer because 0 is a useful value, so checking against it can be incorrect.
}
// StateNotifier mocks the same method in the chain service.
@@ -323,6 +324,9 @@ func (ms *ChainService) GenesisValidatorRoot() [32]byte {
// CurrentSlot mocks the same method in the chain service.
func (ms *ChainService) CurrentSlot() uint64 {
if ms.Slot != nil {
return *ms.Slot
}
return uint64(time.Now().Unix()-ms.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot
}

View File

@@ -11,11 +11,11 @@ import (
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
db, _ := testDB.SetupDB(t)
beaconDB := testDB.SetupDB(t)
b := testutil.NewBeaconBlock()
b.Block.Slot = 32
require.NoError(t, db.SaveBlock(context.Background(), b))
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
@@ -71,7 +71,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
beaconDB: db,
beaconDB: beaconDB,
wsRoot: tt.wsRoot[:],
wsEpoch: tt.wsEpoch,
wsVerified: tt.wsVerified,

View File

@@ -11,9 +11,7 @@ go_library(
"committees.go",
"common.go",
"doc.go",
"hot_state_cache.go",
"skip_slot_cache.go",
"state_summary.go",
"subnet_ids.go",
"proposer_indices_type.go",
] + select({
@@ -58,7 +56,6 @@ go_test(
"committee_fuzz_test.go",
"committee_test.go",
"cache_test.go",
"hot_state_cache_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"proposer_indices_test.go"

View File

@@ -1,17 +1,13 @@
package cache
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func TestMain(m *testing.M) {
run := func() int {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
defer resetCfg()
return m.Run()
}
os.Exit(run())
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
defer resetCfg()
m.Run()
}

View File

@@ -5,6 +5,7 @@ go_library(
name = "go_default_library",
srcs = [
"deposits_cache.go",
"log.go",
"pending_deposits.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",

View File

@@ -19,7 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -78,7 +78,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil {
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,

View File

@@ -611,8 +611,8 @@ func TestPruneProofs_Ok(t *testing.T) {
require.NoError(t, dc.PruneProofs(context.Background(), 1))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
}
@@ -654,7 +654,7 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
require.NoError(t, dc.PruneProofs(context.Background(), 2))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
}
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
@@ -694,10 +694,10 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
require.NoError(t, dc.PruneProofs(context.Background(), 99))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
}
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
@@ -737,10 +737,10 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
require.NoError(t, dc.PruneProofs(context.Background(), 4))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
}
func makeDepositProof() [][]byte {

View File

@@ -0,0 +1,5 @@
package depositcache
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "depositcache")

View File

@@ -10,7 +10,7 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/hashutil"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -33,7 +33,7 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
}).Debug("Ignoring nil deposit insertion")

View File

@@ -63,6 +63,17 @@ func (c *ProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
return nil
}
// HasProposerIndices returns the proposer indices of a block root seed.
func (c *ProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
c.lock.RLock()
defer c.lock.RUnlock()
_, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
if err != nil {
return false, err
}
return exists, nil
}
// ProposerIndices returns the proposer indices of a block root seed.
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
c.lock.RLock()

View File

@@ -22,3 +22,8 @@ func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
return nil, nil
}
// HasProposerIndices returns the proposer indices of a block root seed.
func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
return false, nil
}

View File

@@ -33,6 +33,9 @@ func TestProposerCache_AddProposerIndicesList(t *testing.T) {
if indices != nil {
t.Error("Expected committee count not to exist in empty cache")
}
has, err := cache.HasProposerIndices(bRoot)
require.NoError(t, err)
assert.Equal(t, false, has)
require.NoError(t, cache.AddProposerIndices(&ProposerIndices{
ProposerIndices: indices,
BlockRoot: bRoot,
@@ -41,6 +44,9 @@ func TestProposerCache_AddProposerIndicesList(t *testing.T) {
received, err := cache.ProposerIndices(bRoot)
require.NoError(t, err)
assert.DeepEqual(t, received, indices)
has, err = cache.HasProposerIndices(bRoot)
require.NoError(t, err)
assert.Equal(t, true, has)
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []uint64{1, 2, 3, 4, 5, 6}}
require.NoError(t, cache.AddProposerIndices(item))
@@ -48,6 +54,10 @@ func TestProposerCache_AddProposerIndicesList(t *testing.T) {
received, err = cache.ProposerIndices(item.BlockRoot)
require.NoError(t, err)
assert.DeepEqual(t, item.ProposerIndices, received)
has, err = cache.HasProposerIndices(bRoot)
require.NoError(t, err)
assert.Equal(t, true, has)
}
func TestProposerCache_CanRotate(t *testing.T) {

View File

@@ -35,7 +35,7 @@ func newSubnetIDs() *subnetIDs {
panic(err)
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
subLength := epochDuration * time.Duration(params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription)
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerRandomSubnetSubscription)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &subnetIDs{attester: attesterCache, aggregator: aggregatorCache, persistentSubnets: persistentCache}
}

View File

@@ -35,12 +35,12 @@ go_library(
"//shared/hashutil:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"//shared/slashutil:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],

View File

@@ -12,7 +12,6 @@ import (
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -110,10 +109,9 @@ func ProcessAttestationNoVerifySignature(
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
defer span.End()
if att == nil || att.Data == nil || att.Data.Target == nil {
return nil, errors.New("nil attestation data target")
if err := helpers.ValidateNilAttestation(att); err != nil {
return nil, err
}
currEpoch := helpers.SlotToEpoch(beaconState.Slot())
var prevEpoch uint64
if currEpoch == 0 {
@@ -275,8 +273,8 @@ func VerifyAttestationsSignatures(ctx context.Context, beaconState *stateTrie.Be
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
// the signature in that attestation.
func VerifyAttestationSignature(ctx context.Context, beaconState *stateTrie.BeaconState, att *ethpb.Attestation) error {
if att == nil || att.Data == nil || att.AggregationBits.Count() == 0 {
return fmt.Errorf("nil or missing attestation data: %v", att)
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
@@ -345,35 +343,3 @@ func verifyAttestationsSigWithDomain(ctx context.Context, beaconState *stateTrie
}
return nil
}
// VerifyAttSigUseCheckPt uses the checkpoint info object to verify attestation signature.
func VerifyAttSigUseCheckPt(ctx context.Context, c *pb.CheckPtInfo, att *ethpb.Attestation) error {
if att == nil || att.Data == nil || att.AggregationBits.Count() == 0 {
return fmt.Errorf("nil or missing attestation data: %v", att)
}
seed := bytesutil.ToBytes32(c.Seed)
committee, err := helpers.BeaconCommittee(c.ActiveIndices, seed, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
return err
}
domain, err := helpers.Domain(c.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, c.GenesisRoot)
if err != nil {
return err
}
indices := indexedAtt.AttestingIndices
var pubkeys []bls.PublicKey
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := c.PubKeys[indices[i]]
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
if err != nil {
return errors.Wrap(err, "could not deserialize validator public key")
}
pubkeys = append(pubkeys, pk)
}
return attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain)
}

View File

@@ -24,12 +24,12 @@ import (
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
attestations := []*ethpb.Attestation{
{
testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Slot: 5,
},
},
}),
}
b := testutil.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
@@ -50,10 +50,10 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
}
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
att := &ethpb.Attestation{
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}}}
Target: &ethpb.Checkpoint{Epoch: 0}}})
b := testutil.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
@@ -211,15 +211,13 @@ func TestProcessAttestations_OK(t *testing.T) {
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := &ethpb.Attestation{
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
Signature: make([]byte, 96),
}
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
@@ -251,11 +249,10 @@ func TestProcessAttestations_OK(t *testing.T) {
func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
data := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
BeaconBlockRoot: make([]byte, 32),
}
data := testutil.HydrateAttestationData(&ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
})
aggBits1 := bitfield.NewBitlist(4)
aggBits1.SetBitAt(0, true)
aggBits1.SetBitAt(1, true)
@@ -316,11 +313,10 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
data := &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
BeaconBlockRoot: make([]byte, 32),
}
data := testutil.HydrateAttestationData(&ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
})
aggBits1 := bitfield.NewBitlist(9)
aggBits1.SetBitAt(0, true)
aggBits1.SetBitAt(1, true)
@@ -387,12 +383,12 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
func TestProcessAttestationsNoVerify_IncorrectSlotTargetEpoch(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisState(t, 1)
att := &ethpb.Attestation{
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
}
})
wanted := fmt.Sprintf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(att.Data.Slot), att.Data.Target.Epoch)
_, err := blocks.ProcessAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, wanted, err)
@@ -489,13 +485,9 @@ func TestConvertToIndexed_OK(t *testing.T) {
var sig [96]byte
copy(sig[:], "signed")
attestation := &ethpb.Attestation{
attestation := testutil.HydrateAttestation(&ethpb.Attestation{
Signature: sig[:],
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
}
})
for _, tt := range tests {
attestation.AggregationBits = tt.aggregationBitfield
wanted := &ethpb.IndexedAttestation{
@@ -539,58 +531,39 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
attestation *ethpb.IndexedAttestation
}{
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 2,
Root: make([]byte, 32),
},
Source: &ethpb.Checkpoint{
Root: make([]byte, 32),
},
BeaconBlockRoot: make([]byte, 32),
},
Source: &ethpb.Checkpoint{},
}),
AttestingIndices: []uint64{1},
Signature: make([]byte, 96),
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, 32),
},
Source: &ethpb.Checkpoint{
Root: make([]byte, 32),
},
BeaconBlockRoot: make([]byte, 32),
},
}),
AttestingIndices: []uint64{47, 99, 101},
Signature: make([]byte, 96),
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 4,
Root: make([]byte, 32),
},
Source: &ethpb.Checkpoint{
Root: make([]byte, 32),
},
BeaconBlockRoot: make([]byte, 32),
},
}),
AttestingIndices: []uint64{21, 72},
Signature: make([]byte, 96),
}},
{attestation: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Epoch: 7,
Root: make([]byte, 32),
},
Source: &ethpb.Checkpoint{
Root: make([]byte, 32),
},
BeaconBlockRoot: make([]byte, 32),
},
}),
AttestingIndices: []uint64{100, 121, 122},
Signature: make([]byte, 96),
}},
@@ -696,17 +669,12 @@ func TestVerifyAttestations_VerifiesMultipleAttestations(t *testing.T) {
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := &ethpb.Attestation{
att1 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1,
},
Signature: make([]byte, 96),
}
})
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
@@ -720,17 +688,13 @@ func TestVerifyAttestations_VerifiesMultipleAttestations(t *testing.T) {
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := &ethpb.Attestation{
att2 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1,
CommitteeIndex: 1,
},
Signature: make([]byte, 96),
}
})
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
@@ -771,17 +735,12 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := &ethpb.Attestation{
att1 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1,
},
Signature: make([]byte, 96),
}
})
prevDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := helpers.ComputeSigningRoot(att1.Data, prevDomain)
@@ -795,17 +754,13 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
comm2, err := helpers.BeaconCommitteeFromState(st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := &ethpb.Attestation{
att2 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
CommitteeIndex: 1,
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
CommitteeIndex: 1,
},
Signature: make([]byte, 96),
}
})
currDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err = helpers.ComputeSigningRoot(att2.Data, currDomain)
@@ -842,17 +797,12 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := &ethpb.Attestation{
att1 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1,
},
Signature: make([]byte, 96),
}
})
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
@@ -866,17 +816,13 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := &ethpb.Attestation{
att2 := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slot: 1,
CommitteeIndex: 1,
},
Signature: make([]byte, 96),
}
})
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil

View File

@@ -10,6 +10,7 @@ import (
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/slashutil"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
@@ -120,7 +121,10 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
return false
}
isDoubleVote := !attestationutil.AttDataIsEqual(data1, data2) && data1.Target.Epoch == data2.Target.Epoch
isSurroundVote := data1.Source.Epoch < data2.Source.Epoch && data2.Target.Epoch < data1.Target.Epoch
att1 := &ethpb.IndexedAttestation{Data: data1}
att2 := &ethpb.IndexedAttestation{Data: data2}
// Check if att1 is surrounding att2.
isSurroundVote := slashutil.IsSurround(att1, att2)
return isDoubleVote || isSurroundVote
}

View File

@@ -18,16 +18,14 @@ import (
)
func TestSlashableAttestationData_CanSlash(t *testing.T) {
att1 := &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
BeaconBlockRoot: make([]byte, 32),
}
att2 := &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
BeaconBlockRoot: make([]byte, 32),
}
att1 := testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
})
att2 := testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
})
assert.Equal(t, true, blocks.IsSlashableAttestationData(att1, att2), "Atts should have been slashable")
att1.Target.Epoch = 4
att1.Source.Epoch = 2
@@ -36,26 +34,14 @@ func TestSlashableAttestationData_CanSlash(t *testing.T) {
}
func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
},
}
slashings := []*ethpb.AttesterSlashing{{
Attestation_1: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{}),
Attestation_2: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
Target: &ethpb.Checkpoint{Epoch: 1}},
})}}
var registry []*ethpb.Validator
currentSlot := uint64(0)
@@ -86,24 +72,15 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Attestation_1: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
Signature: make([]byte, 96),
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
}),
Attestation_2: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
Signature: make([]byte, 96),
},
}),
},
}
@@ -124,14 +101,12 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch
}
att1 := &ethpb.IndexedAttestation{
att1 := testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
}
})
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
@@ -141,14 +116,9 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
att2 := testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
}
})
signingRoot, err = helpers.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])

View File

@@ -38,11 +38,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
root1 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '1'}
att1 := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: root1[:]},
BeaconBlockRoot: make([]byte, 32),
},
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{Target: &ethpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
AttestingIndices: setA,
Signature: make([]byte, 96),
}
@@ -60,11 +56,9 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
root2 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '2'}
att2 := &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: root2[:]},
BeaconBlockRoot: make([]byte, 32),
},
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: root2[:]},
}),
AttestingIndices: setB,
Signature: make([]byte, 96),
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
@@ -223,7 +222,6 @@ func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) e
return nil
}
// Deprecated: This method uses deprecated ssz.SigningRoot.
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
return depositutil.VerifyDepositSignature(obj, domain)
}
@@ -248,7 +246,12 @@ func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, dom
}
pks[i] = dpk
sigs[i] = dep.Data.Signature
root, err := ssz.SigningRoot(dep.Data)
depositMessage := &pb.DepositMessage{
PublicKey: dep.Data.PublicKey,
WithdrawalCredentials: dep.Data.WithdrawalCredentials,
Amount: dep.Data.Amount,
}
root, err := depositMessage.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not get signing root")
}

View File

@@ -188,7 +188,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
}
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
//Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
// Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
@@ -277,9 +277,7 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
testutil.ResetCache()
dep, _, err := testutil.DeterministicDepositsAndKeys(100)
require.NoError(t, err)
defer func() {
testutil.ResetCache()
}()
defer testutil.ResetCache()
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := testutil.DepositTrieFromDeposits(dep)
require.NoError(t, err)

View File

@@ -37,12 +37,9 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
state := testutil.NewBeaconState()
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: 10, // Must be less than block.Slot
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}))
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 10, // Must be less than block.Slot
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -74,12 +71,9 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
testutil.ResetCache()
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
require.NoError(t, beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}))
require.NoError(t, beaconState.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
})))
require.NoError(t, beaconState.SetSlot(10))
lbhdr, err := beaconState.LatestBlockHeader().HashTreeRoot()
@@ -115,13 +109,9 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
state := testutil.NewBeaconState()
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}))
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
})))
lbhsr, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -239,13 +229,9 @@ func TestProcessBlockHeader_OK(t *testing.T) {
state := testutil.NewBeaconState()
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}))
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -300,13 +286,10 @@ func TestBlockSignatureSet_OK(t *testing.T) {
state := testutil.NewBeaconState()
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}))
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)

View File

@@ -141,27 +141,21 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
proposerIdx := uint64(1)
header1 := &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Header: testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
Slot: 0,
ParentRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
StateRoot: bytesutil.PadTo([]byte("A"), 32),
},
}),
}
var err error
header1.Signature, err = helpers.ComputeDomainAndSign(beaconState, 0, header1.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(t, err)
header2 := &ethpb.SignedBeaconBlockHeader{
header2 := testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
Slot: 0,
ParentRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
StateRoot: bytesutil.PadTo([]byte("B"), 32),
},
}
})
header2.Signature, err = helpers.ComputeDomainAndSign(beaconState, 0, header2.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(t, err)
@@ -211,24 +205,18 @@ func TestVerifyProposerSlashing(t *testing.T) {
name: "same header, same slot as state",
args: args{
slashing: &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header_1: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: currentSlot,
StateRoot: bytesutil.PadTo([]byte{}, 32),
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
},
Header_2: &ethpb.SignedBeaconBlockHeader{
}),
Header_2: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: currentSlot,
StateRoot: bytesutil.PadTo([]byte{}, 32),
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
},
}),
},
beaconState: beaconState,
},
@@ -238,26 +226,18 @@ func TestVerifyProposerSlashing(t *testing.T) {
name: "same header, different signatures",
args: args{
slashing: &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header_1: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: 0,
StateRoot: bytesutil.PadTo([]byte{}, 32),
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
Signature: sig1,
},
Header_2: &ethpb.SignedBeaconBlockHeader{
}),
Header_2: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 1,
Slot: 0,
StateRoot: bytesutil.PadTo([]byte{}, 32),
BodyRoot: bytesutil.PadTo([]byte{}, 32),
ParentRoot: bytesutil.PadTo([]byte{}, 32),
},
Signature: sig2,
},
}),
},
beaconState: beaconState,
},

View File

@@ -1,6 +1,7 @@
package epoch_test
import (
"fmt"
"testing"
"github.com/gogo/protobuf/proto"
@@ -40,10 +41,10 @@ func TestUnslashedAttestingIndices_CanSortAndFilter(t *testing.T) {
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
indices, err := epoch.UnslashedAttestingIndices(state, atts)
indices, err := epoch.UnslashedAttestingIndices(beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices)-1; i++ {
if indices[i] >= indices[i+1] {
@@ -53,10 +54,10 @@ func TestUnslashedAttestingIndices_CanSortAndFilter(t *testing.T) {
// Verify the slashed validator is filtered.
slashedValidator := indices[0]
validators = state.Validators()
validators = beaconState.Validators()
validators[slashedValidator].Slashed = true
require.NoError(t, state.SetValidators(validators))
indices, err = epoch.UnslashedAttestingIndices(state, atts)
require.NoError(t, beaconState.SetValidators(validators))
indices, err = epoch.UnslashedAttestingIndices(beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices); i++ {
assert.NotEqual(t, slashedValidator, indices[i], "Slashed validator %d is not filtered", slashedValidator)
@@ -86,10 +87,10 @@ func TestUnslashedAttestingIndices_DuplicatedAttestations(t *testing.T) {
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
indices, err := epoch.UnslashedAttestingIndices(state, atts)
indices, err := epoch.UnslashedAttestingIndices(beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices)-1; i++ {
@@ -132,10 +133,10 @@ func TestAttestingBalance_CorrectBalance(t *testing.T) {
Validators: validators,
Balances: balances,
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
balance, err := epoch.AttestingBalance(state, atts)
balance, err := epoch.AttestingBalance(beaconState, atts)
require.NoError(t, err)
wanted := 256 * params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, balance)
@@ -158,9 +159,9 @@ func TestBaseReward_AccurateRewards(t *testing.T) {
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: tt.b}},
Balances: []uint64{tt.a},
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
c, err := epoch.BaseReward(state, 0)
c, err := epoch.BaseReward(beaconState, 0)
require.NoError(t, err)
assert.Equal(t, tt.c, c, "epoch.BaseReward(%d)", tt.a)
}
@@ -249,7 +250,7 @@ func TestProcessSlashings_SlashedLess(t *testing.T) {
}
for i, tt := range tests {
t.Run(string(i), func(t *testing.T) {
t.Run(fmt.Sprint(i), func(t *testing.T) {
original := proto.Clone(tt.state)
s, err := state.InitializeFromProto(tt.state)
require.NoError(t, err)
@@ -309,9 +310,9 @@ func TestProcessRegistryUpdates_NoRotation(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
newState, err := epoch.ProcessRegistryUpdates(state)
newState, err := epoch.ProcessRegistryUpdates(beaconState)
require.NoError(t, err)
for i, validator := range newState.Validators() {
assert.Equal(t, params.BeaconConfig().MaxSeedLookahead, validator.ExitEpoch, "Could not update registry %d", i)
@@ -332,10 +333,10 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
})
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(state)
newState, err := epoch.ProcessRegistryUpdates(state)
currentEpoch := helpers.CurrentEpoch(beaconState)
newState, err := epoch.ProcessRegistryUpdates(beaconState)
require.NoError(t, err)
for i, validator := range newState.Validators() {
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
@@ -361,9 +362,9 @@ func TestProcessRegistryUpdates_ActivationCompletes(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
newState, err := epoch.ProcessRegistryUpdates(state)
newState, err := epoch.ProcessRegistryUpdates(beaconState)
require.NoError(t, err)
for i, validator := range newState.Validators() {
assert.Equal(t, params.BeaconConfig().MaxSeedLookahead, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i)
@@ -385,9 +386,9 @@ func TestProcessRegistryUpdates_ValidatorsEjected(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
newState, err := epoch.ProcessRegistryUpdates(state)
newState, err := epoch.ProcessRegistryUpdates(beaconState)
require.NoError(t, err)
for i, validator := range newState.Validators() {
assert.Equal(t, params.BeaconConfig().MaxSeedLookahead+1, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i)
@@ -410,9 +411,9 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
newState, err := epoch.ProcessRegistryUpdates(state)
newState, err := epoch.ProcessRegistryUpdates(beaconState)
require.NoError(t, err)
for i, validator := range newState.Validators() {
assert.Equal(t, exitEpoch, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i)

View File

@@ -33,24 +33,24 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
vp, bp, err := New(context.Background(), state)
vp, bp, err := New(context.Background(), beaconState)
require.NoError(t, err)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
require.NoError(t, err)
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
beaconState, err = ProcessRewardsAndPenaltiesPrecompute(beaconState, bp, vp)
require.NoError(t, err)
// Indices that voted everything except for head, lost a bit money
wanted := uint64(31999810265)
assert.Equal(t, wanted, state.Balances()[4], "Unexpected balance")
assert.Equal(t, wanted, beaconState.Balances()[4], "Unexpected balance")
// Indices that did not vote, lost more money
wanted = uint64(31999873505)
assert.Equal(t, wanted, state.Balances()[0], "Unexpected balance")
assert.Equal(t, wanted, beaconState.Balances()[0], "Unexpected balance")
}
func TestAttestationDeltaPrecompute(t *testing.T) {
@@ -75,34 +75,34 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
slashedAttestedIndices := []uint64{1413}
for _, i := range slashedAttestedIndices {
vs := state.Validators()
vs := beaconState.Validators()
vs[i].Slashed = true
require.Equal(t, nil, state.SetValidators(vs))
require.Equal(t, nil, beaconState.SetValidators(vs))
}
vp, bp, err := New(context.Background(), state)
vp, bp, err := New(context.Background(), beaconState)
require.NoError(t, err)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
require.NoError(t, err)
// Add some variances to target and head balances.
// See: https://github.com/prysmaticlabs/prysm/issues/5593
bp.PrevEpochTargetAttested = bp.PrevEpochTargetAttested / 2
bp.PrevEpochHeadAttested = bp.PrevEpochHeadAttested * 2 / 3
rewards, penalties, err := AttestationsDelta(state, bp, vp)
rewards, penalties, err := AttestationsDelta(beaconState, bp, vp)
require.NoError(t, err)
attestedBalance, err := epoch.AttestingBalance(state, atts)
attestedBalance, err := epoch.AttestingBalance(beaconState, atts)
require.NoError(t, err)
totalBalance, err := helpers.TotalActiveBalance(state)
totalBalance, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
attestedIndices := []uint64{55, 1339, 1746, 1811, 1569}
for _, i := range attestedIndices {
base, err := epoch.BaseReward(state, i)
base, err := epoch.BaseReward(beaconState, i)
require.NoError(t, err, "Could not get base reward")
// Base rewards for getting source right
@@ -119,7 +119,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
}
for _, i := range slashedAttestedIndices {
base, err := epoch.BaseReward(state, i)
base, err := epoch.BaseReward(beaconState, i)
assert.NoError(t, err, "Could not get base reward")
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
@@ -127,7 +127,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
nonAttestedIndices := []uint64{434, 677, 872, 791}
for _, i := range nonAttestedIndices {
base, err := epoch.BaseReward(state, i)
base, err := epoch.BaseReward(beaconState, i)
assert.NoError(t, err, "Could not get base reward")
wanted := 3 * base
// Since all these validators did not attest, they shouldn't get rewarded.
@@ -159,17 +159,17 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
pVals, pBal, err := New(context.Background(), state)
pVals, pBal, err := New(context.Background(), beaconState)
assert.NoError(t, err)
pVals, pBal, err = ProcessAttestations(context.Background(), state, pVals, pBal)
pVals, pBal, err = ProcessAttestations(context.Background(), beaconState, pVals, pBal)
require.NoError(t, err)
pBal.ActiveCurrentEpoch = 0 // Could cause a divide by zero panic.
_, _, err = AttestationsDelta(state, pBal, pVals)
_, _, err = AttestationsDelta(beaconState, pBal, pVals)
require.NoError(t, err)
}
@@ -197,12 +197,12 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
pVals, pBal, err := New(context.Background(), state)
pVals, pBal, err := New(context.Background(), beaconState)
require.NoError(t, err)
_, _, err = ProcessAttestations(context.Background(), state, pVals, pBal)
_, _, err = ProcessAttestations(context.Background(), beaconState, pVals, pBal)
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
}
@@ -223,27 +223,27 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch*10))
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch*10))
slashedAttestedIndices := []uint64{14, 37, 68, 77, 139}
for _, i := range slashedAttestedIndices {
vs := state.Validators()
vs := beaconState.Validators()
vs[i].Slashed = true
require.NoError(t, state.SetValidators(vs))
require.NoError(t, beaconState.SetValidators(vs))
}
vp, bp, err := New(context.Background(), state)
vp, bp, err := New(context.Background(), beaconState)
require.NoError(t, err)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
require.NoError(t, err)
rewards, penalties, err := AttestationsDelta(state, bp, vp)
rewards, penalties, err := AttestationsDelta(beaconState, bp, vp)
require.NoError(t, err)
finalityDelay := helpers.PrevEpoch(state) - state.FinalizedCheckpointEpoch()
finalityDelay := helpers.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
for _, i := range slashedAttestedIndices {
base, err := epoch.BaseReward(state, i)
base, err := epoch.BaseReward(beaconState, i)
require.NoError(t, err, "Could not get base reward")
penalty := 3 * base
proposerReward := base / params.BeaconConfig().ProposerRewardQuotient
@@ -297,7 +297,7 @@ func TestProposerDeltaPrecompute_HappyCase(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(10)
base := buildState(e, validatorCount)
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
proposerIndex := uint64(1)
@@ -305,7 +305,7 @@ func TestProposerDeltaPrecompute_HappyCase(t *testing.T) {
v := []*Validator{
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
}
r, err := ProposersDelta(state, b, v)
r, err := ProposersDelta(beaconState, b, v)
require.NoError(t, err)
baseReward := v[0].CurrentEpochEffectiveBalance * params.BeaconConfig().BaseRewardFactor /
@@ -319,7 +319,7 @@ func TestProposerDeltaPrecompute_ValidatorIndexOutOfRange(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(10)
base := buildState(e, validatorCount)
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
proposerIndex := validatorCount
@@ -327,7 +327,7 @@ func TestProposerDeltaPrecompute_ValidatorIndexOutOfRange(t *testing.T) {
v := []*Validator{
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
}
_, err = ProposersDelta(state, b, v)
_, err = ProposersDelta(beaconState, b, v)
assert.ErrorContains(t, "proposer index out of range", err)
}
@@ -335,7 +335,7 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(10)
base := buildState(e, validatorCount)
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
proposerIndex := uint64(1)
@@ -343,7 +343,7 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
v := []*Validator{
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex, IsSlashed: true},
}
r, err := ProposersDelta(state, b, v)
r, err := ProposersDelta(beaconState, b, v)
require.NoError(t, err)
assert.Equal(t, uint64(0), r[proposerIndex], "Unexpected proposer reward for slashed")
}
@@ -351,51 +351,51 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
func TestFinalityDelay(t *testing.T) {
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
prevEpoch := uint64(0)
finalizedEpoch := uint64(0)
// Set values for each test case
setVal := func() {
prevEpoch = helpers.PrevEpoch(state)
finalizedEpoch = state.FinalizedCheckpointEpoch()
prevEpoch = helpers.PrevEpoch(beaconState)
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
d := finalityDelay(prevEpoch, finalizedEpoch)
w := helpers.PrevEpoch(state) - state.FinalizedCheckpointEpoch()
w := helpers.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, state.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
d = finalityDelay(prevEpoch, finalizedEpoch)
w = helpers.PrevEpoch(state) - state.FinalizedCheckpointEpoch()
w = helpers.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, state.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
d = finalityDelay(prevEpoch, finalizedEpoch)
w = helpers.PrevEpoch(state) - state.FinalizedCheckpointEpoch()
w = helpers.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
}
func TestIsInInactivityLeak(t *testing.T) {
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
state, err := state.InitializeFromProto(base)
beaconState, err := state.InitializeFromProto(base)
require.NoError(t, err)
prevEpoch := uint64(0)
finalizedEpoch := uint64(0)
// Set values for each test case
setVal := func() {
prevEpoch = helpers.PrevEpoch(state)
finalizedEpoch = state.FinalizedCheckpointEpoch()
prevEpoch = helpers.PrevEpoch(beaconState)
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
assert.Equal(t, true, isInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, state.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
assert.Equal(t, true, isInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, state.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
assert.Equal(t, false, isInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
}

View File

@@ -1,6 +1,7 @@
package precompute_test
import (
"fmt"
"testing"
"github.com/gogo/protobuf/proto"
@@ -111,7 +112,7 @@ func TestProcessSlashingsPrecompute_SlashedLess(t *testing.T) {
}
for i, tt := range tests {
t.Run(string(i), func(t *testing.T) {
t.Run(fmt.Sprint(i), func(t *testing.T) {
ab := uint64(0)
for i, b := range tt.state.Balances {
// Skip validator 0 since it's slashed

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/ghodss/yaml"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -44,7 +43,7 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
preBeaconStateFile, err := testutil.BazelFileBytes(path.Join(testFolderPath, "pre.ssz"))
require.NoError(t, err)
preBeaconStateBase := &pb.BeaconState{}
require.NoError(t, ssz.Unmarshal(preBeaconStateFile, preBeaconStateBase), "Failed to unmarshal")
require.NoError(t, preBeaconStateBase.UnmarshalSSZ(preBeaconStateFile), "Failed to unmarshal")
preBeaconState, err := beaconstate.InitializeFromProto(preBeaconStateBase)
require.NoError(t, err)

View File

@@ -25,15 +25,15 @@ func runJustificationAndFinalizationTests(t *testing.T, config string) {
}
}
func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, state *state.BeaconState) (*state.BeaconState, error) {
func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st *state.BeaconState) (*state.BeaconState, error) {
ctx := context.Background()
vp, bp, err := precompute.New(ctx, state)
vp, bp, err := precompute.New(ctx, st)
require.NoError(t, err)
_, bp, err = precompute.ProcessAttestations(ctx, state, vp, bp)
_, bp, err = precompute.ProcessAttestations(ctx, st, vp, bp)
require.NoError(t, err)
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
require.NoError(t, err, "Could not process justification")
return state, nil
return st, nil
}

View File

@@ -27,15 +27,15 @@ func runRewardsAndPenaltiesTests(t *testing.T, config string) {
}
}
func processRewardsAndPenaltiesPrecomputeWrapper(t *testing.T, state *state.BeaconState) (*state.BeaconState, error) {
func processRewardsAndPenaltiesPrecomputeWrapper(t *testing.T, st *state.BeaconState) (*state.BeaconState, error) {
ctx := context.Background()
vp, bp, err := precompute.New(ctx, state)
vp, bp, err := precompute.New(ctx, st)
require.NoError(t, err)
vp, bp, err = precompute.ProcessAttestations(ctx, state, vp, bp)
vp, bp, err = precompute.ProcessAttestations(ctx, st, vp, bp)
require.NoError(t, err)
state, err = precompute.ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
st, err = precompute.ProcessRewardsAndPenaltiesPrecompute(st, bp, vp)
require.NoError(t, err, "Could not process reward")
return state, nil
return st, nil
}

View File

@@ -1,21 +1,17 @@
package spectest
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestMain(m *testing.M) {
run := func() int {
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -8,5 +8,8 @@ go_library(
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state",
visibility = ["//beacon-chain:__subpackages__"],
deps = ["//shared/event:go_default_library"],
deps = [
"//shared/event:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],
)

View File

@@ -3,7 +3,11 @@
// and chain start.
package state
import "time"
import (
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
)
const (
// BlockProcessed is sent after a block has been processed and updated the state database.
@@ -25,6 +29,8 @@ type BlockProcessedData struct {
Slot uint64
// BlockRoot of the processed block.
BlockRoot [32]byte
// SignedBlock is the physical processed block.
SignedBlock *ethpb.SignedBeaconBlock
// Verified is true if the block's BLS contents have been verified.
Verified bool
}

View File

@@ -2,6 +2,7 @@ package helpers
import (
"encoding/binary"
"errors"
"fmt"
"time"
@@ -12,6 +13,28 @@ import (
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
// ValidateNilAttestation checks if any composite field of input attestation is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func ValidateNilAttestation(attestation *ethpb.Attestation) error {
if attestation == nil {
return errors.New("attestation can't be nil")
}
if attestation.Data == nil {
return errors.New("attestation's data can't be nil")
}
if attestation.Data.Source == nil {
return errors.New("attestation's source can't be nil")
}
if attestation.Data.Target == nil {
return errors.New("attestation's target can't be nil")
}
if attestation.AggregationBits == nil {
return errors.New("attestation's bitfield can't be nil")
}
return nil
}
// IsAggregator returns true if the signature is from the input validator. The committee
// count is provided as an argument rather than imported implementation from spec. Having
// committee count as an argument allows cheaper computation at run time.

View File

@@ -225,3 +225,72 @@ func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 4}, genesis))
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(&ethpb.Checkpoint{Epoch: 2}, genesis))
}
func TestValidateNilAttestation(t *testing.T) {
tests := []struct {
name string
attestation *ethpb.Attestation
errString string
}{
{
name: "nil attestation",
attestation: nil,
errString: "attestation can't be nil",
},
{
name: "nil attestation data",
attestation: &ethpb.Attestation{},
errString: "attestation's data can't be nil",
},
{
name: "nil attestation source",
attestation: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: nil,
Target: &ethpb.Checkpoint{},
},
},
errString: "attestation's source can't be nil",
},
{
name: "nil attestation target",
attestation: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: nil,
Source: &ethpb.Checkpoint{},
},
},
errString: "attestation's target can't be nil",
},
{
name: "nil attestation bitfield",
attestation: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
},
},
errString: "attestation's bitfield can't be nil",
},
{
name: "good attestation",
attestation: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
},
AggregationBits: []byte{},
},
errString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.errString != "" {
require.ErrorContains(t, tt.errString, helpers.ValidateNilAttestation(tt.attestation))
} else {
require.NoError(t, helpers.ValidateNilAttestation(tt.attestation))
}
})
}
}

View File

@@ -337,14 +337,6 @@ func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) er
return nil
}
indices, err := ActiveValidatorIndices(state, epoch)
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
// Use state root from (current_epoch - 1 - lookahead))
wantedEpoch := PrevEpoch(state)
if wantedEpoch >= params.BeaconConfig().MinSeedLookahead {
@@ -358,10 +350,27 @@ func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) er
if err != nil {
return err
}
// Skip Cache if we have an invalid key
// Skip cache update if we have an invalid key
if r == nil || bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
return nil
}
// Skip cache update if the key already exists
exists, err := proposerIndicesCache.HasProposerIndices(bytesutil.ToBytes32(r))
if err != nil {
return err
}
if exists {
return nil
}
indices, err := ActiveValidatorIndices(state, epoch)
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
BlockRoot: bytesutil.ToBytes32(r),
ProposerIndices: proposerIndices,

View File

@@ -659,8 +659,8 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly")
}
func TestUpdateProposerIndicesInCache_CouldNotGetActiveIndices(t *testing.T) {
func TestUpdateProposerIndicesInCache_SlotOutOfBound(t *testing.T) {
err := UpdateProposerIndicesInCache(&beaconstate.BeaconState{}, 2)
want := "nil inner state"
want := "out of bound"
require.ErrorContains(t, want, err)
}

View File

@@ -23,8 +23,8 @@ const DomainByteLength = 4
var ErrSigFailedToVerify = errors.New("signature did not verify")
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
func ComputeDomainAndSign(state *state.BeaconState, epoch uint64, obj interface{}, domain [4]byte, key bls.SecretKey) ([]byte, error) {
d, err := Domain(state.Fork(), epoch, domain, state.GenesisValidatorRoot())
func ComputeDomainAndSign(st *state.BeaconState, epoch uint64, obj interface{}, domain [4]byte, key bls.SecretKey) ([]byte, error) {
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
if err != nil {
return nil, err
}
@@ -73,12 +73,12 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
}
// ComputeDomainVerifySigningRoot computes domain and verifies signing root of an object given the beacon state, validator index and signature.
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
v, err := state.ValidatorAtIndex(index)
func ComputeDomainVerifySigningRoot(st *state.BeaconState, index, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
v, err := st.ValidatorAtIndex(index)
if err != nil {
return err
}
d, err := Domain(state.Fork(), epoch, domain, state.GenesisValidatorRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
if err != nil {
return err
}

View File

@@ -6,6 +6,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"skip_slot_cache.go",
"state.go",
"transition.go",
@@ -73,6 +74,7 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/sszutil:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
@@ -81,7 +83,6 @@ go_test(
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,5 @@
package state
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "state")

View File

@@ -6,10 +6,10 @@ import (
"sync"
"testing"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sszutil"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
@@ -34,7 +34,7 @@ func TestSkipSlotCache_OK(t *testing.T) {
bState, err = state.ExecuteStateTransition(context.Background(), bState, blk)
require.NoError(t, err, "Could not process state transition")
if !ssz.DeepEqual(originalState.CloneInnerState(), bState.CloneInnerState()) {
if !sszutil.DeepEqual(originalState.CloneInnerState(), bState.CloneInnerState()) {
t.Fatal("Skipped slots cache leads to different states")
}
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -312,7 +311,7 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
defer func() {
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
traceutil.AnnotateError(span, err)
logrus.WithError(err).Error("Failed to mark skip slot no longer in progress")
log.WithError(err).Error("Failed to mark skip slot no longer in progress")
}
}()
@@ -322,7 +321,7 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
// Cache last best value.
if highestSlot < state.Slot() {
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
logrus.WithError(err).Error("Failed to put skip slot cache value")
log.WithError(err).Error("Failed to put skip slot cache value")
}
}
return nil, ctx.Err()
@@ -347,7 +346,7 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
if highestSlot < state.Slot() {
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
logrus.WithError(err).Error("Failed to put skip slot cache value")
log.WithError(err).Error("Failed to put skip slot cache value")
traceutil.AnnotateError(span, err)
}
}

View File

@@ -164,24 +164,16 @@ func TestProcessBlock_IncorrectProposerSlashing(t *testing.T) {
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, 1)
require.NoError(t, err)
slashing := &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header_1: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
Slot: params.BeaconConfig().SlotsPerEpoch,
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
}),
Header_2: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
},
Signature: make([]byte, 96),
},
}),
}
block.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{slashing}
@@ -203,15 +195,10 @@ func TestProcessBlock_IncorrectProcessBlockAttestations(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
priv, err := bls.RandKey()
require.NoError(t, err)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
att := testutil.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(3),
Signature: priv.Sign([]byte("foo")).Marshal(),
}
})
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, 1)
require.NoError(t, err)
@@ -236,43 +223,31 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
proposerSlashings := []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header_1: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 3,
Slot: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: bytesutil.PadTo([]byte("A"), 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
}),
Header_2: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 3,
Slot: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: bytesutil.PadTo([]byte("B"), 96),
},
}),
},
}
attesterSlashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{}),
AttestingIndices: []uint64{0, 1},
Signature: make([]byte, 96),
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
Data: testutil.HydrateAttestationData(&ethpb.AttestationData{}),
AttestingIndices: []uint64{0, 1},
Signature: make([]byte, 96),
},
@@ -283,15 +258,12 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
blockRoots = append(blockRoots, []byte{byte(i)})
}
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
blockAtt := &ethpb.Attestation{
blockAtt := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte("hello-world"), 32)},
},
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x01},
Signature: make([]byte, 96),
}
})
attestations := []*ethpb.Attestation{blockAtt}
var exits []*ethpb.SignedVoluntaryExit
for i := uint64(0); i < params.BeaconConfig().MaxVoluntaryExits+1; i++ {
@@ -300,12 +272,11 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
genesisBlock := blocks.NewGenesisBlock([]byte{})
bodyRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
err = beaconState.SetLatestBlockHeader(&ethpb.BeaconBlockHeader{
err = beaconState.SetLatestBlockHeader(testutil.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: genesisBlock.Block.Slot,
ParentRoot: genesisBlock.Block.ParentRoot,
BodyRoot: bodyRoot[:],
StateRoot: make([]byte, 32),
})
}))
require.NoError(t, err)
parentRoot, err := beaconState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -357,27 +328,23 @@ func createFullBlockWithOperations(t *testing.T) (*beaconstate.BeaconState,
require.NoError(t, err)
currentEpoch := helpers.CurrentEpoch(beaconState)
header1 := &ethpb.SignedBeaconBlockHeader{
header1 := testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerSlashIdx,
Slot: 1,
StateRoot: bytesutil.PadTo([]byte("A"), 32),
ParentRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
}
})
header1.Signature, err = helpers.ComputeDomainAndSign(beaconState, currentEpoch, header1.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerSlashIdx])
require.NoError(t, err)
header2 := &ethpb.SignedBeaconBlockHeader{
header2 := testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerSlashIdx,
Slot: 1,
StateRoot: bytesutil.PadTo([]byte("B"), 32),
ParentRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
}
})
header2.Signature, err = helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), header2.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerSlashIdx])
require.NoError(t, err)
@@ -392,15 +359,12 @@ func createFullBlockWithOperations(t *testing.T) (*beaconstate.BeaconState,
require.NoError(t, beaconState.SetValidators(validators))
mockRoot2 := [32]byte{'A'}
att1 := &ethpb.IndexedAttestation{
att1 := testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot2[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot2[:]},
},
AttestingIndices: []uint64{0, 1},
Signature: make([]byte, 96),
}
})
domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
@@ -411,15 +375,13 @@ func createFullBlockWithOperations(t *testing.T) (*beaconstate.BeaconState,
att1.Signature = aggregateSig.Marshal()
mockRoot3 := [32]byte{'B'}
att2 := &ethpb.IndexedAttestation{
att2 := testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot3[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot3[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AttestingIndices: []uint64{0, 1},
Signature: make([]byte, 96),
}
})
hashTreeRoot, err = helpers.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
@@ -443,18 +405,13 @@ func createFullBlockWithOperations(t *testing.T) (*beaconstate.BeaconState,
aggBits := bitfield.NewBitlist(1)
aggBits.SetBitAt(0, true)
blockAtt := &ethpb.Attestation{
blockAtt := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: beaconState.Slot(),
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Epoch: helpers.CurrentEpoch(beaconState), Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: mockRoot[:],
}},
Slot: beaconState.Slot(),
Target: &ethpb.Checkpoint{Epoch: helpers.CurrentEpoch(beaconState)},
Source: &ethpb.Checkpoint{Root: mockRoot[:]}},
AggregationBits: aggBits,
Signature: make([]byte, 96),
}
})
committee, err := helpers.BeaconCommitteeFromState(beaconState, blockAtt.Data.Slot, blockAtt.Data.CommitteeIndex)
assert.NoError(t, err)
@@ -491,25 +448,20 @@ func createFullBlockWithOperations(t *testing.T) (*beaconstate.BeaconState,
require.NoError(t, err)
proposerIndex, err := helpers.BeaconProposerIndex(copied)
require.NoError(t, err)
block := &ethpb.SignedBeaconBlock{
block := testutil.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
ParentRoot: parentRoot[:],
Slot: beaconState.Slot() + 1,
ProposerIndex: proposerIndex,
Body: &ethpb.BeaconBlockBody{
Graffiti: make([]byte, 32),
RandaoReveal: randaoReveal,
ProposerSlashings: proposerSlashings,
AttesterSlashings: attesterSlashings,
Attestations: []*ethpb.Attestation{blockAtt},
VoluntaryExits: []*ethpb.SignedVoluntaryExit{exit},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
BlockHash: bytesutil.PadTo([]byte{3}, 32),
},
},
},
}
})
sig, err := testutil.BlockSignature(beaconState, block.Block, privKeys)
require.NoError(t, err)
@@ -638,22 +590,12 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) {
// Set up attester slashing object for block
attesterSlashings := []*ethpb.AttesterSlashing{
{
Attestation_1: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
Attestation_1: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{2, 3},
},
Attestation_2: &ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
}),
Attestation_2: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{2, 3},
},
}),
},
}
@@ -763,16 +705,10 @@ func TestProcessBlk_AttsBasedOnValidatorCount(t *testing.T) {
atts := make([]*ethpb.Attestation, 1)
for i := 0; i < len(atts); i++ {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
Source: &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
BeaconBlockRoot: make([]byte, 32),
},
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 1},
AggregationBits: aggBits,
Signature: make([]byte, 96),
}
})
committee, err := helpers.BeaconCommitteeFromState(s, att.Data.Slot, att.Data.CommitteeIndex)
assert.NoError(t, err)

View File

@@ -77,7 +77,7 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
{ExitEpoch: exitedEpoch + 2},
{ExitEpoch: exitedEpoch + 2},
{ExitEpoch: exitedEpoch + 2},
{ExitEpoch: exitedEpoch + 2}, //over flow here
{ExitEpoch: exitedEpoch + 2}, // overflow here
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
}}
state, err := beaconstate.InitializeFromProto(base)

View File

@@ -11,7 +11,9 @@ go_library(
name = "go_default_library",
srcs = [
"alias.go",
"http_backup_handler.go",
"cmd.go",
"log.go",
"restore.go",
] + select({
":kafka_disabled": [
"db.go",
@@ -30,7 +32,13 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//shared/cmd:go_default_library",
"//shared/fileutil:go_default_library",
"//shared/promptutil:go_default_library",
"//shared/tos:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
] + select({
"//conditions:default": [
"//beacon-chain/db/kafka:go_default_library",

31
beacon-chain/db/cmd.go Normal file
View File

@@ -0,0 +1,31 @@
package db
import (
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/tos"
"github.com/urfave/cli/v2"
)
// DatabaseCommands for Prysm beacon node.
var DatabaseCommands = &cli.Command{
Name: "db",
Category: "db",
Usage: "defines commands for interacting with eth2 beacon node database",
Subcommands: []*cli.Command{
{
Name: "restore",
Description: `restores a database from a backup file`,
Flags: cmd.WrapFlags([]cli.Flag{
cmd.RestoreSourceFileFlag,
cmd.RestoreTargetDirFlag,
}),
Before: tos.VerifyTosAcceptedOrPrompt,
Action: func(cliCtx *cli.Context) error {
if err := restore(cliCtx); err != nil {
log.Fatalf("Could not restore database: %v", err)
}
return nil
},
},
},
}

View File

@@ -3,11 +3,12 @@
package db
import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
)
// NewDB initializes a new DB.
func NewDB(dirPath string, stateSummaryCache *cache.StateSummaryCache) (Database, error) {
return kv.NewKVStore(dirPath, stateSummaryCache)
func NewDB(ctx context.Context, dirPath string) (Database, error) {
return kv.NewKVStore(ctx, dirPath)
}

View File

@@ -3,13 +3,12 @@
package db
import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kafka"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
)
// NewDB initializes a new DB with kafka wrapper.
func NewDB(dirPath string, stateSummaryCache *cache.StateSummaryCache) (Database, error) {
func NewDB(dirPath string, stateSummaryCache *kv.stateSummaryCache) (Database, error) {
db, err := kv.NewKVStore(dirPath, stateSummaryCache)
if err != nil {
return nil, err

View File

@@ -11,6 +11,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/backuputil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/proto/beacon/db"
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/backuputil"
)
// ReadOnlyDatabase defines a struct which only has read access to database methods.
@@ -21,6 +22,8 @@ type ReadOnlyDatabase interface {
Block(ctx context.Context, blockRoot [32]byte) (*eth.SignedBeaconBlock, error)
Blocks(ctx context.Context, f *filters.QueryFilter) ([]*eth.SignedBeaconBlock, [][32]byte, error)
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
BlocksBySlot(ctx context.Context, slot uint64) (bool, []*eth.SignedBeaconBlock, error)
BlockRootsBySlot(ctx context.Context, slot uint64) (bool, [][32]byte, error)
HasBlock(ctx context.Context, blockRoot [32]byte) bool
GenesisBlock(ctx context.Context) (*eth.SignedBeaconBlock, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
@@ -100,11 +103,9 @@ type HeadAccessDatabase interface {
// Database interface with full access.
type Database interface {
io.Closer
backuputil.BackupExporter
HeadAccessDatabase
DatabasePath() string
ClearDB() error
// Backup and restore methods
Backup(ctx context.Context, outputDir string) error
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"export_wrapper.go",
"log.go",
"passthrough.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kafka",

View File

@@ -14,14 +14,12 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
_ "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka/librdkafka" // Required for c++ kafka library.
)
var _ iface.Database = (*Exporter)(nil)
var log = logrus.WithField("prefix", "exporter")
var marshaler = &jsonpb.Marshaler{}
// Exporter wraps a database interface and exports certain objects to kafka topics.

View File

@@ -0,0 +1,5 @@
package kafka
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "exporter")

View File

@@ -46,6 +46,16 @@ func (e Exporter) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32
return e.db.BlockRoots(ctx, f)
}
// BlocksBySlot -- passthrough.
func (e Exporter) BlocksBySlot(ctx context.Context, slot uint64) (bool, []*eth.SignedBeaconBlock, error) {
return e.db.BlocksBySlot(ctx, slot)
}
// BlockRootsBySlot -- passthrough.
func (e Exporter) BlockRootsBySlot(ctx context.Context, slot uint64) (bool, [][32]byte, error) {
return e.db.BlockRootsBySlot(ctx, slot)
}
// HasBlock -- passthrough.
func (e Exporter) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
return e.db.HasBlock(ctx, blockRoot)
@@ -127,8 +137,8 @@ func (e Exporter) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte)
}
// SaveState -- passthrough.
func (e Exporter) SaveState(ctx context.Context, state *state.BeaconState, blockRoot [32]byte) error {
return e.db.SaveState(ctx, state, blockRoot)
func (e Exporter) SaveState(ctx context.Context, st *state.BeaconState, blockRoot [32]byte) error {
return e.db.SaveState(ctx, st, blockRoot)
}
// SaveStateSummary -- passthrough.

View File

@@ -12,6 +12,7 @@ go_library(
"encoding.go",
"finalized_block_roots.go",
"kv.go",
"log.go",
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
@@ -21,12 +22,12 @@ go_library(
"slashings.go",
"state.go",
"state_summary.go",
"state_summary_cache.go",
"utils.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
@@ -75,7 +76,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/state:go_default_library",
"//proto/beacon/db:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/shared/fileutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -43,7 +42,7 @@ func (s *Store) Backup(ctx context.Context, outputDir string) error {
return err
}
backupPath := path.Join(backupsDir, fmt.Sprintf("prysm_beacondb_at_slot_%07d.backup", head.Block.Slot))
logrus.WithField("prefix", "db").WithField("backup", backupPath).Info("Writing backup database.")
log.WithField("backup", backupPath).Info("Writing backup database.")
copyDB, err := bolt.Open(
backupPath,
@@ -51,17 +50,17 @@ func (s *Store) Backup(ctx context.Context, outputDir string) error {
&bolt.Options{Timeout: params.BeaconIoConfig().BoltTimeout},
)
if err != nil {
panic(err)
return err
}
defer func() {
if err := copyDB.Close(); err != nil {
logrus.WithError(err).Error("Failed to close destination database")
log.WithError(err).Error("Failed to close backup database")
}
}()
return s.db.View(func(tx *bolt.Tx) error {
return tx.ForEach(func(name []byte, b *bolt.Bucket) error {
logrus.Debugf("Copying bucket %s\n", name)
log.Debugf("Copying bucket %s\n", name)
return copyDB.Update(func(tx2 *bolt.Tx) error {
b2, err := tx2.CreateBucketIfNotExists(name)
if err != nil {

View File

@@ -3,7 +3,8 @@ package kv
import (
"context"
"io/ioutil"
"path"
"os"
"path/filepath"
"testing"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -11,7 +12,8 @@ import (
)
func TestStore_Backup(t *testing.T) {
db := setupDB(t)
db, err := NewKVStore(context.Background(), t.TempDir())
require.NoError(t, err, "Failed to instantiate DB")
ctx := context.Background()
head := testutil.NewBeaconBlock()
@@ -26,7 +28,22 @@ func TestStore_Backup(t *testing.T) {
require.NoError(t, db.Backup(ctx, ""))
files, err := ioutil.ReadDir(path.Join(db.databasePath, backupsDirectoryName))
backupsPath := filepath.Join(db.databasePath, backupsDirectoryName)
files, err := ioutil.ReadDir(backupsPath)
require.NoError(t, err)
require.NotEqual(t, 0, len(files), "No backups created")
require.NoError(t, db.Close(), "Failed to close database")
oldFilePath := filepath.Join(backupsPath, files[0].Name())
newFilePath := filepath.Join(backupsPath, DatabaseFileName)
// We rename the file to match the database file name
// our NewKVStore function expects when opening a database.
require.NoError(t, os.Rename(oldFilePath, newFilePath))
backedDB, err := NewKVStore(ctx, backupsPath)
require.NoError(t, err, "Failed to instantiate DB")
t.Cleanup(func() {
require.NoError(t, backedDB.Close(), "Failed to close database")
})
require.Equal(t, true, backedDB.HasState(ctx, root))
}

View File

@@ -134,6 +134,55 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
return exists
}
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
func (s *Store) BlocksBySlot(ctx context.Context, slot uint64) (bool, []*ethpb.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
defer span.End()
blocks := make([]*ethpb.SignedBeaconBlock, 0)
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
keys, err := getBlockRootsBySlot(ctx, tx, slot)
if err != nil {
return err
}
for i := 0; i < len(keys); i++ {
encoded := bkt.Get(keys[i])
block := &ethpb.SignedBeaconBlock{}
if err := decode(ctx, encoded, block); err != nil {
return err
}
blocks = append(blocks, block)
}
return nil
})
return len(blocks) > 0, blocks, err
}
// BlockRootsBySlot retrieves a list of beacon block roots by slot
func (s *Store) BlockRootsBySlot(ctx context.Context, slot uint64) (bool, [][32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlockRootsBySlot")
defer span.End()
blockRoots := make([][32]byte, 0)
err := s.db.View(func(tx *bolt.Tx) error {
keys, err := getBlockRootsBySlot(ctx, tx, slot)
if err != nil {
return err
}
for i := 0; i < len(keys); i++ {
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
}
return nil
})
if err != nil {
return false, nil, errors.Wrap(err, "could not retrieve block roots by slot")
}
return len(blockRoots) > 0, blockRoots, nil
}
// deleteBlock by block root.
func (s *Store) deleteBlock(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteBlock")
@@ -240,10 +289,9 @@ func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
hasStateSummaryInCache := s.stateSummaryCache.Has(blockRoot)
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) != nil
hasStateSummaryInDB := s.HasStateSummary(ctx, blockRoot)
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
if !(hasStateInDB || hasStateSummaryInDB) {
return errors.New("no state or state summary found with head block root")
}
@@ -394,6 +442,11 @@ func fetchBlockRootsBySlotRange(
ctx, span := trace.StartSpan(ctx, "BeaconDB.fetchBlockRootsBySlotRange")
defer span.End()
// Return nothing when all slot parameters are missing
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
return [][]byte{}, nil
}
var startSlot, endSlot, step uint64
var ok bool
if startSlot, ok = startSlotEncoded.(uint64); !ok {
@@ -428,10 +481,6 @@ func fetchBlockRootsBySlotRange(
if endSlot < startSlot {
return nil, errInvalidSlotRange
}
// Return nothing with an end slot of 0.
if endSlot == 0 {
return [][]byte{}, nil
}
rootsRange := (endSlot - startSlot) / step
roots := make([][]byte, 0, rootsRange)
c := bkt.Cursor()
@@ -452,6 +501,24 @@ func fetchBlockRootsBySlotRange(
return roots, nil
}
// getBlockRootsByFilter retrieves the block roots by slot
func getBlockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot uint64) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.getBlockRootsBySlot")
defer span.End()
roots := make([][]byte, 0)
bkt := tx.Bucket(blockSlotIndicesBucket)
key := bytesutil.Uint64ToBytesBigEndian(slot)
c := bkt.Cursor()
k, v := c.Seek(key)
if k != nil && bytes.Equal(k, key) {
for i := 0; i < len(v); i += 32 {
roots = append(roots, v[i:i+32])
}
}
return roots, nil
}
// createBlockIndicesFromBlock takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).

View File

@@ -118,7 +118,7 @@ func TestStore_BlocksHandleZeroCase(t *testing.T) {
zeroFilter := filters.NewFilter().SetStartSlot(0).SetEndSlot(0)
retrieved, _, err := db.Blocks(ctx, zeroFilter)
require.NoError(t, err)
assert.Equal(t, 0, len(retrieved), "Unexpected number of blocks received, expected none")
assert.Equal(t, 1, len(retrieved), "Unexpected number of blocks received, expected one")
}
func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
@@ -450,3 +450,54 @@ func TestStore_SaveBlocks_HasRootsMatched(t *testing.T) {
assert.Equal(t, roots[i], rt, "mismatch of block roots")
}
}
func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b1 := testutil.NewBeaconBlock()
b1.Block.Slot = 20
require.NoError(t, db.SaveBlock(ctx, b1))
b2 := testutil.NewBeaconBlock()
b2.Block.Slot = 100
b2.Block.ParentRoot = bytesutil.PadTo([]byte("parent1"), 32)
require.NoError(t, db.SaveBlock(ctx, b2))
b3 := testutil.NewBeaconBlock()
b3.Block.Slot = 100
b3.Block.ParentRoot = bytesutil.PadTo([]byte("parent2"), 32)
require.NoError(t, db.SaveBlock(ctx, b3))
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
r3, err := b3.Block.HashTreeRoot()
require.NoError(t, err)
hasBlocks, retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
assert.Equal(t, false, hasBlocks, "Expected no blocks")
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(b1, retrievedBlocks[0]), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(b2, retrievedBlocks[0]), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
assert.Equal(t, true, proto.Equal(b3, retrievedBlocks[1]), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{}, retrievedBlockRoots)
assert.Equal(t, false, hasBlockRoots, "Expected no block roots")
hasBlockRoots, retrievedBlockRoots, err = db.BlockRootsBySlot(ctx, 20)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{r1}, retrievedBlockRoots)
assert.Equal(t, true, hasBlockRoots, "Expected no block roots")
hasBlockRoots, retrievedBlockRoots, err = db.BlockRootsBySlot(ctx, 100)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{r2, r3}, retrievedBlockRoots)
assert.Equal(t, true, hasBlockRoots, "Expected no block roots")
}

View File

@@ -60,10 +60,9 @@ func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil
hasStateSummaryInCache := s.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
hasStateSummaryInDB := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
if !(hasStateInDB || hasStateSummaryInDB) {
return errMissingStateForCheckpoint
}
return bucket.Put(justifiedCheckpointKey, enc)
@@ -81,10 +80,9 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil
hasStateSummaryInCache := s.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
hasStateSummaryInDB := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
if !(hasStateInDB || hasStateSummaryInDB) {
return errMissingStateForCheckpoint
}
if err := bucket.Put(finalizedCheckpointKey, enc); err != nil {

View File

@@ -15,7 +15,7 @@ func TestStore_DepositContract(t *testing.T) {
contractAddress := common.Address{1, 2, 3}
retrieved, err := db.DepositContractAddress(ctx)
require.NoError(t, err)
assert.DeepEqual(t, ([]uint8)(nil), retrieved, "Expected nil contract address")
assert.DeepEqual(t, []uint8(nil), retrieved, "Expected nil contract address")
require.NoError(t, db.SaveDepositContractAddress(ctx, contractAddress))
retrieved, err = db.DepositContractAddress(ctx)
require.NoError(t, err)

View File

@@ -3,6 +3,7 @@
package kv
import (
"context"
"os"
"path"
"time"
@@ -11,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
prombolt "github.com/prysmaticlabs/prombbolt"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/shared/fileutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -24,9 +24,13 @@ const (
// VotesCacheSize with 1M validators will be 8MB.
VotesCacheSize = 1 << 23
// NumOfVotes specifies the vote cache size.
NumOfVotes = 1 << 20
databaseFileName = "beaconchain.db"
boltAllocSize = 8 * 1024 * 1024
NumOfVotes = 1 << 20
// BeaconNodeDbDirName is the name of the directory containing the beacon node database.
BeaconNodeDbDirName = "beaconchaindata"
// DatabaseFileName is the name of the beacon node database.
DatabaseFileName = "beaconchain.db"
boltAllocSize = 8 * 1024 * 1024
)
// BlockCacheSize specifies 1000 slots worth of blocks cached, which
@@ -40,13 +44,14 @@ type Store struct {
databasePath string
blockCache *ristretto.Cache
validatorIndexCache *ristretto.Cache
stateSummaryCache *cache.StateSummaryCache
stateSummaryCache *stateSummaryCache
ctx context.Context
}
// NewKVStore initializes a new boltDB key-value store at the directory
// path specified, creates the kv-buckets based on the schema, and stores
// an open connection db object as a property of the Store struct.
func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*Store, error) {
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
hasDir, err := fileutil.HasDir(dirPath)
if err != nil {
return nil, err
@@ -56,7 +61,7 @@ func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*St
return nil, err
}
}
datafile := path.Join(dirPath, databaseFileName)
datafile := path.Join(dirPath, DatabaseFileName)
boltDB, err := bolt.Open(datafile, params.BeaconIoConfig().ReadWritePermissions, &bolt.Options{Timeout: 1 * time.Second, InitialMmapSize: 10e6})
if err != nil {
if errors.Is(err, bolt.ErrTimeout) {
@@ -88,7 +93,8 @@ func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*St
databasePath: dirPath,
blockCache: blockCache,
validatorIndexCache: validatorCache,
stateSummaryCache: stateSummaryCache,
stateSummaryCache: newStateSummaryCache(),
ctx: ctx,
}
if err := kv.db.Update(func(tx *bolt.Tx) error {
@@ -134,7 +140,7 @@ func (s *Store) ClearDB() error {
return nil
}
prometheus.Unregister(createBoltCollector(s.db))
if err := os.Remove(path.Join(s.databasePath, databaseFileName)); err != nil {
if err := os.Remove(path.Join(s.databasePath, DatabaseFileName)); err != nil {
return errors.Wrap(err, "could not remove database file")
}
return nil
@@ -143,6 +149,12 @@ func (s *Store) ClearDB() error {
// Close closes the underlying BoltDB database.
func (s *Store) Close() error {
prometheus.Unregister(createBoltCollector(s.db))
// Before DB closes, we should dump the cached state summary objects to DB.
if err := s.saveCachedStateSummariesDB(s.ctx); err != nil {
return err
}
return s.db.Close()
}

View File

@@ -1,15 +1,15 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
// setupDB instantiates and returns a Store instance.
func setupDB(t testing.TB) *Store {
db, err := NewKVStore(t.TempDir(), cache.NewStateSummaryCache())
db, err := NewKVStore(context.Background(), t.TempDir())
require.NoError(t, err, "Failed to instantiate DB")
t.Cleanup(func() {
require.NoError(t, db.Close(), "Failed to close database")

View File

@@ -0,0 +1,5 @@
package kv
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "db")

View File

@@ -6,6 +6,7 @@ import (
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
@@ -14,24 +15,16 @@ func TestStore_ProposerSlashing_CRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
prop := &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header_1: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 5,
BodyRoot: make([]byte, 32),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
}),
Header_2: testutil.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 5,
BodyRoot: make([]byte, 32),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
}),
}
slashingRoot, err := prop.HashTreeRoot()
require.NoError(t, err)
@@ -51,37 +44,14 @@ func TestStore_AttesterSlashing_CRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
att := &ethpb.AttesterSlashing{
Attestation_1: &ethpb.IndexedAttestation{
Attestation_1: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 5,
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpb.IndexedAttestation{
Slot: 5,
}}),
Attestation_2: testutil.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 7,
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
},
Signature: make([]byte, 96),
},
}
Slot: 7,
}})}
slashingRoot, err := att.HashTreeRoot()
require.NoError(t, err)
retrieved, err := db.AttesterSlashing(ctx, slashingRoot)

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
log "github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)

View File

@@ -4,6 +4,7 @@ import (
"context"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
@@ -21,25 +22,30 @@ func (s *Store) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSum
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummaries")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
for _, summary := range summaries {
enc, err := encode(ctx, summary)
if err != nil {
return err
}
if err := bucket.Put(summary.Root, enc); err != nil {
return err
}
// When we reach the state summary cache prune count,
// dump the cached state summaries to the DB.
if s.stateSummaryCache.len() >= stateSummaryCachePruneCount {
if err := s.saveCachedStateSummariesDB(ctx); err != nil {
return err
}
return nil
})
}
for _, ss := range summaries {
s.stateSummaryCache.put(bytesutil.ToBytes32(ss.Root), ss)
}
return nil
}
// StateSummary returns the state summary object from the db using input block root.
func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.StateSummary")
defer span.End()
if s.stateSummaryCache.has(blockRoot) {
return s.stateSummaryCache.get(blockRoot), nil
}
enc, err := s.stateSummaryBytes(ctx, blockRoot)
if err != nil {
return nil, err
@@ -58,6 +64,11 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.State
func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
defer span.End()
if s.stateSummaryCache.has(blockRoot) {
return true
}
enc, err := s.stateSummaryBytes(ctx, blockRoot)
if err != nil {
panic(err)
@@ -78,3 +89,29 @@ func (s *Store) stateSummaryBytes(ctx context.Context, blockRoot [32]byte) ([]by
return enc, err
}
// This saves all cached state summary objects to DB, and clears up the cache.
func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
summaries := s.stateSummaryCache.getAll()
encs := make([][]byte, len(summaries))
for i, s := range summaries {
enc, err := encode(ctx, s)
if err != nil {
return err
}
encs[i] = enc
}
if err := s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
for i, s := range summaries {
if err := bucket.Put(s.Root, encs[i]); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
s.stateSummaryCache.clear()
return nil
}

View File

@@ -1,4 +1,4 @@
package cache
package kv
import (
"sync"
@@ -6,47 +6,56 @@ import (
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
// StateSummaryCache caches state summary object.
type StateSummaryCache struct {
const stateSummaryCachePruneCount = 128
// stateSummaryCache caches state summary object.
type stateSummaryCache struct {
initSyncStateSummaries map[[32]byte]*pb.StateSummary
initSyncStateSummariesLock sync.RWMutex
}
// NewStateSummaryCache creates a new state summary cache.
func NewStateSummaryCache() *StateSummaryCache {
return &StateSummaryCache{
// newStateSummaryCache creates a new state summary cache.
func newStateSummaryCache() *stateSummaryCache {
return &stateSummaryCache{
initSyncStateSummaries: make(map[[32]byte]*pb.StateSummary),
}
}
// Put saves a state summary to the initial sync state summaries cache.
func (s *StateSummaryCache) Put(r [32]byte, b *pb.StateSummary) {
// put saves a state summary to the initial sync state summaries cache.
func (s *stateSummaryCache) put(r [32]byte, b *pb.StateSummary) {
s.initSyncStateSummariesLock.Lock()
defer s.initSyncStateSummariesLock.Unlock()
s.initSyncStateSummaries[r] = b
}
// Has checks if a state summary exists in the initial sync state summaries cache using the root
// has checks if a state summary exists in the initial sync state summaries cache using the root
// of the block.
func (s *StateSummaryCache) Has(r [32]byte) bool {
func (s *stateSummaryCache) has(r [32]byte) bool {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
_, ok := s.initSyncStateSummaries[r]
return ok
}
// Get retrieves a state summary from the initial sync state summaries cache using the root of
// get retrieves a state summary from the initial sync state summaries cache using the root of
// the block.
func (s *StateSummaryCache) Get(r [32]byte) *pb.StateSummary {
func (s *stateSummaryCache) get(r [32]byte) *pb.StateSummary {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
b := s.initSyncStateSummaries[r]
return b
}
// len retrieves the state summary count from the state summaries cache.
func (s *stateSummaryCache) len() int {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
return len(s.initSyncStateSummaries)
}
// GetAll retrieves all the beacon state summaries from the initial sync state summaries cache, the returned
// state summaries are unordered.
func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
func (s *stateSummaryCache) getAll() []*pb.StateSummary {
s.initSyncStateSummariesLock.RLock()
defer s.initSyncStateSummariesLock.RUnlock()
@@ -58,7 +67,7 @@ func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
}
// Clear clears out the initial sync state summaries cache.
func (s *StateSummaryCache) Clear() {
func (s *stateSummaryCache) clear() {
s.initSyncStateSummariesLock.Lock()
defer s.initSyncStateSummariesLock.Unlock()
s.initSyncStateSummaries = make(map[[32]byte]*pb.StateSummary)

View File

@@ -38,3 +38,26 @@ func TestStateSummary_CanSaveRretrieve(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, s2, saved, "State summary does not equal")
}
func TestStateSummary_CacheToDB(t *testing.T) {
db := setupDB(t)
summaries := make([]*pb.StateSummary, stateSummaryCachePruneCount-1)
for i := range summaries {
summaries[i] = &pb.StateSummary{Slot: uint64(i), Root: bytesutil.PadTo(bytesutil.Uint64ToBytesLittleEndian(uint64(i)), 32)}
}
require.NoError(t, db.SaveStateSummaries(context.Background(), summaries))
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount-1)
require.NoError(t, db.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1000, Root: []byte{'a', 'b'}}))
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount)
require.NoError(t, db.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1001, Root: []byte{'c', 'd'}}))
require.Equal(t, db.stateSummaryCache.len(), 1)
for i := range summaries {
r := bytesutil.Uint64ToBytesLittleEndian(uint64(i))
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
}
}

View File

@@ -299,7 +299,7 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
unfinalizedRoots := [][32]byte{}
var unfinalizedRoots [][32]byte
for i := uint64(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
b := testutil.NewBeaconBlock()
b.Block.Slot = i

5
beacon-chain/db/log.go Normal file
View File

@@ -0,0 +1,5 @@
package db
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "db")

View File

@@ -0,0 +1,45 @@
package db
import (
"os"
"path"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/fileutil"
"github.com/prysmaticlabs/prysm/shared/promptutil"
"github.com/urfave/cli/v2"
)
const dbExistsYesNoPrompt = "A database file already exists in the target directory. " +
"Are you sure that you want to overwrite it? [y/n]"
func restore(cliCtx *cli.Context) error {
sourceFile := cliCtx.String(cmd.RestoreSourceFileFlag.Name)
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
if fileutil.FileExists(path.Join(restoreDir, kv.DatabaseFileName)) {
resp, err := promptutil.ValidatePrompt(
os.Stdin, dbExistsYesNoPrompt, promptutil.ValidateYesOrNo,
)
if err != nil {
return errors.Wrap(err, "could not validate choice")
}
if strings.EqualFold(resp, "n") {
log.Info("Restore aborted")
return nil
}
}
if err := fileutil.MkdirAll(restoreDir); err != nil {
return err
}
if err := fileutil.CopyFile(sourceFile, path.Join(restoreDir, kv.DatabaseFileName)); err != nil {
return err
}
log.Info("Restore completed successfully")
return nil
}

View File

@@ -0,0 +1,70 @@
package db
import (
"context"
"flag"
"io/ioutil"
"os"
"path"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestRestore(t *testing.T) {
logHook := logTest.NewGlobal()
ctx := context.Background()
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir())
defer func() {
require.NoError(t, backupDb.Close())
}()
require.NoError(t, err)
head := testutil.NewBeaconBlock()
head.Block.Slot = 5000
require.NoError(t, backupDb.SaveBlock(ctx, head))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st := testutil.NewBeaconState()
require.NoError(t, backupDb.SaveState(ctx, st, root))
require.NoError(t, backupDb.SaveHeadBlockRoot(ctx, root))
require.NoError(t, err)
require.NoError(t, backupDb.Close())
// We rename the backup file so that we can later verify
// whether the restored db has been renamed correctly.
require.NoError(t, os.Rename(
path.Join(backupDb.DatabasePath(), kv.DatabaseFileName),
path.Join(backupDb.DatabasePath(), "backup.db")))
restoreDir := t.TempDir()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String(cmd.RestoreSourceFileFlag.Name, "", "")
set.String(cmd.RestoreTargetDirFlag.Name, "", "")
require.NoError(t, set.Set(cmd.RestoreSourceFileFlag.Name, path.Join(backupDb.DatabasePath(), "backup.db")))
require.NoError(t, set.Set(cmd.RestoreTargetDirFlag.Name, restoreDir))
cliCtx := cli.NewContext(&app, set, nil)
assert.NoError(t, restore(cliCtx))
files, err := ioutil.ReadDir(path.Join(restoreDir, kv.BeaconNodeDbDirName))
require.NoError(t, err)
assert.Equal(t, 1, len(files))
assert.Equal(t, kv.DatabaseFileName, files[0].Name())
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName))
defer func() {
require.NoError(t, restoredDb.Close())
}()
require.NoError(t, err)
headBlock, err := restoredDb.HeadBlock(ctx)
require.NoError(t, err)
assert.Equal(t, uint64(5000), headBlock.Block.Slot, "Restored database has incorrect data")
assert.LogsContain(t, logHook, "Restore completed successfully")
}

View File

@@ -7,7 +7,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/testing",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
],

View File

@@ -3,17 +3,16 @@
package testing
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
)
// SetupDB instantiates and returns database backed by key value store.
func SetupDB(t testing.TB) (db.Database, *cache.StateSummaryCache) {
sc := cache.NewStateSummaryCache()
s, err := kv.NewKVStore(t.TempDir(), sc)
func SetupDB(t testing.TB) db.Database {
s, err := kv.NewKVStore(context.Background(), t.TempDir())
if err != nil {
t.Fatal(err)
}
@@ -22,5 +21,5 @@ func SetupDB(t testing.TB) (db.Database, *cache.StateSummaryCache) {
t.Fatalf("failed to close database: %v", err)
}
})
return s, sc
return s
}

View File

@@ -6,6 +6,7 @@ go_library(
"base.go",
"config.go",
"interop.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/flags",
visibility = ["//beacon-chain:__subpackages__"],

Some files were not shown because too many files have changed in this diff Show More