Compare commits

...

42 Commits

Author SHA1 Message Date
terence tsao
0bcdf60a6a Fix tests with new service 2022-05-10 09:57:59 -07:00
terence tsao
b5dc7adbef First take of unit test 2022-05-10 09:13:22 -07:00
terence tsao
cfea22a946 Save block/state before insert block to FC 2022-05-10 08:29:32 -07:00
Nishant Das
208ae6bbf0 Fix Doppelganger Check (#10582)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
(cherry picked from commit ad03938964)
2022-04-29 15:04:30 -05:00
Nishant Das
cb494c2215 Cleanup Discarded Connections Correctly (#10574)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
(cherry picked from commit 58ad800553)
2022-04-29 15:04:15 -05:00
terencechain
844d9623b6 Can retrieve cached initial sync block and db block (#10568)
* Save cached initial sync blocks before getting head block

* Add better abstraction to get block

* Move unlock read to a better location

* Feedbacks

* Add head changed logging

* Harder hasBlock requirement

* Update beacon-chain/blockchain/service.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update receive_attestation.go

* Don't process head if the block is unknown

* Use a helper method

* Fix test

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
(cherry picked from commit b7a82d0fd1)
2022-04-29 15:04:01 -05:00
Nishant Das
72562dcf3a Fix Another Off By 1 In Our Finalized Trie (#10524)
* fix everything

* fix more

* fix test

* Update beacon-chain/powchain/service.go

* Update beacon-chain/powchain/service.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-15 12:51:56 +00:00
Preston Van Loon
cc23b8311a Static analysis: gocognit (#10527)
* Add gocognit to static analyzers with a very high threshold

* edit readme and sort analyzers
2022-04-15 06:29:07 +00:00
terence tsao
cbe54fe3f9 Handle nil execution block response when logging TTD (#10502) 2022-04-13 19:47:04 -07:00
Nishant Das
1b6adca3ca Handle Finalized Deposit Insertion Better (#10517) 2022-04-13 10:08:19 +02:00
Nishant Das
1651649e5a Update to 1.17.9 (#10518) 2022-04-13 05:25:13 +00:00
Potuz
56187edb98 Use forkchoice first when checking canonical status (#10516) 2022-04-12 21:34:13 -03:00
Preston Van Loon
ecad5bbffc e2e: Provide e2e config yaml to web3signer (#10123)
* e2e: Provide e2e config yaml to web3signer

* fix build for //testing/endtoend:go_default_test

* Update with web3signer with teku fixes

* buildifier

* Add slasher case for web3signer

* Update testing/endtoend/minimal_e2e_test.go

* Update web3signer to 21.10.6

* Revert "Update web3signer to 21.10.6"

This reverts commit bdf3c408f2.

* Remove slasher part of web3signer e2e tests

* Revert "Remove slasher part of web3signer e2e tests"

This reverts commit 24249802ae.

* fix slasher web3signer test

* fixing build

* updating yaml to match testnet_e2e_config.go

* trying a different order to the e2e test and adding a log

* trying different way to kill process

* handling unhandled error

* testing changes to config WIP

* fixing bazel WIP

* fixing build

* ignoring test for now to test

* fixing bazel

* Test only web3signer e2e

* rolling back some commits to test

* fixing bazel

* trying an updated web3signer version

* changing flag to match version

* trying current version of develop for web3signer

* testing not using the --network property

* addressing build error

* testing config change

* reverting to go bakc to using the network file

* testing adding epochs per sync committee period

* rolling back configs

* removing check to test

* adding log to get sync committee duties and changing bellatrix fork epoch to something large and altair to epoch 1

* fixing bazel

* updating epoch in config file

* fixing more descrepencies between the configurations

* removing un used yaml

* removing goland added duplicates

* reverting using network minimal

* fixing bug

* rolling back some changes

* rolling back some changes

* rolling back changes

* making sure web3signer test doesn't make it to bellatrix fork yet

* reverting changes I did not touch

* undo comment

* Update testing/endtoend/deps.bzl

* Apply suggestions from code review

* rm nl

* fix //testing/endtoend:go_mainnet_test

* Remove unnecessary dep

* fix //testing/endtoend:go_mainnet_test

* addressing review comments

* fixing build and internal conflict

* removing web3signer slasher test as it's unneeded due to the interface nature of key signing, the regular slashing test is enough

* fix: The validator we fetch from the binary can only run before altair, if you add that and the web3signer then these things can never run together as the web3signer sets it to before bellatrix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: James He <james@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-04-12 16:57:46 +00:00
Nishant Das
407182387b fix it all (#10511) 2022-04-12 19:50:29 +08:00
terence tsao
ad0b0b503d Move GetTerminalBlockHash to powchain engine (#10500)
* Move GetTerminalBlockHash to powchain engine

* Update service.go

* Update service.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-12 10:19:07 +00:00
terence tsao
58f4ba758c Metrics tracking EE VALID/SYNCING/INVALID response counter (#10504)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-12 09:51:13 +00:00
james-prysm
64f64f06bf Remote Key Manager API(web3signer) (#10302)
* removing flag requirement, can run web3signer without predefined public keys

* placeholders for remote-keymanager-api

* adding proto and accountschangedfeed

* updating generated code

* fix imports

* fixing interface

* adding work in progress apimiddleware code

* started implementing functions for remote keymanager api

* fixing generted code from proto

* fixing protos

* fixing import format

* fixing proto generation again , didn't fix the first time

* fixing imports again

* continuing on implementing functions

* implementing add function

* implementing delete API function

* handling errors for API

* removing unusedcode and fixing format

* fixing bazel

* wip enable --web when running web3signer

* fixing wallet check for web3signer

* fixing apis

* adding list remote keys unit test

* import remote keys test

* delete pubkeys tests

* moving location of tests

* adding unit tests

* adding placeholder functions

* adding more unit tests

* fixing bazel

* fixing build

* fixing already slice issue with unit test

* fixing linting

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/node/node.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/types.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* adding comment on proto based on review

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/keymanager/remote-web3signer/keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding generated code based on review

* updating based on feedback

* fixing imports

* fixing formatting

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing event call

* fixing dependency

* updating bazel

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing comment from review

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-11 16:05:40 -04:00
terence tsao
e70055733f Save state to DB after proposer boost (#10509) 2022-04-11 16:51:49 +00:00
Radosław Kapka
36e4f49af0 Bellatrix evaluators (#10506)
* defensive nil check

* separate ExecutionPayload/Header from codegen

* tell bazel about this new file

* Merge: support terminal difficulty override (#9769)

* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* add in all the fixes

* fix evaluator bugs

* latest fixes

* sum

* fix to be configurable

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* fix broken conditional checks

* fix all issues

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Revert "fix broken conditional checks"

This reverts commit e118db6c20.

* LH multiclient working

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* fix race

* fix test

* Fix base fee per gas

* Fix notifypayload headroot

* tx fuzzer

* clean up with develop branch

* save progress

* 200tx/block

* add LH flags

* Sync with devleop

* cleanup

* cleanup

* hash

* fix build

* fix test

* fix go check

* fmt

* gosec

* Blocked stream

(cherry picked from commit f362af9862db680b6352692217ad5c08d44a1e86)

# Conflicts:
#	proto/prysm/v1alpha1/validator.pb.go

* remove duplicate param

* test

* revert some test changes

* Initial version of EE tx count

* evaluate all txs in epoch

* remove logs

* uncomment tests

* remove unwanted change

* parameterize ExpectedExecEngineTxsThreshold

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-11 13:45:22 +00:00
terence tsao
d98428dec4 Can prune nodes from canonical and payload maps (#10496)
* Can prune nodes from canonical and payload maps

* Update store_test.go

* prune payload hashes, canonical nodes and better testing

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-04-11 11:08:50 +00:00
Nishant Das
00b92e01d3 Fetch Non Finalized Deposits Better (#10505) 2022-04-11 09:59:22 +02:00
Potuz
ca5adbf7e4 Fix optimistic logging (#10503) 2022-04-09 23:09:04 +00:00
Nishant Das
a083b7a0a5 Set Auth Differently In Our Powchain Constructor (#10501) 2022-04-09 11:03:05 +02:00
Raul Jordan
dd5995b665 Proper Connection Management for ETH JSON-RPC Client for Startup and Runtime (#10498)
* begin connection management revamp

* kiln runs

* retry

* reconnect

* add

* rpc connect fix

* remove logging

* logs

* retry

* default value for web3flag

* test pass

* comments

* ensure auth works
2022-04-09 09:28:40 +08:00
Taranpreet26311
6903d52dde Added horusec (#10499)
* Added horusec

* Improving aesthetics

* Restrict branch
2022-04-08 10:52:04 -04:00
Potuz
ac8d27bcf1 Deal with node balance underflow (#10492)
* deal with balance underflow

* fix log format

* preston's review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-07 17:38:48 +00:00
Nishant Das
8d6afb3afd Fix Issues From Multiclient E2E (#10486)
* fix it

* fix index addition

* fix sync issues

* make it nicer
2022-04-07 01:52:24 +00:00
james-prysm
d51f716675 web3signer: fixes json altair block (#10490)
* fix

* fixing naming
2022-04-07 08:24:30 +08:00
terence tsao
0411b7eceb Payload ID caching (#10481) 2022-04-06 16:36:52 -07:00
Radosław Kapka
8dfc80187d Fix receipts root field name in API Middleware (#10488)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-06 21:56:11 +00:00
terence tsao
3833f78803 Remove invalid nodes (#10399) 2022-04-06 14:24:00 -07:00
Potuz
83a83279d4 Remove synced tips and use last valid hash (#10439)
* Remove synced tips

use last valid hash in removing invalid nodes.

* add test

* Remove unused code

* More unused parameters

* Fix proposer boost

* terence's review #1

* Fix conflicts

* terence's review 2

* rename argument

* terence's review #3

* rename optimistic -> status

* Minor clean up

* revert loop variable change

* do not mark lvh as valid

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-06 14:18:30 +00:00
terence tsao
bdab34fd01 Use correct head for notifyForkchoiceUpdate (#10485)
* Use correct head for notifyForkchoiceUpdate

* Fix existing tests

* Update receive_attestation_test.go
2022-04-06 19:05:53 +08:00
kasey
de0143e036 save origin block root before finalize (#10463)
* save origin block root before finalize

* add test for SaveOrigin

* goimports :(

* signature to LoadGenesis changed in a diff PR

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-05 17:13:53 +00:00
Nishant Das
2a7a09b112 Add Lock Analyzer (#10430)
* add lock analyzer

* fix locks

* progress

* fix failures

* fix error log

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-04-05 16:39:48 +00:00
terence tsao
984575ed57 Optimistic check: handle zeros check point root (#10464)
* Handle zero roots

* Update chain_info_test.go

* Update chain_info_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-05 14:37:25 +00:00
Nishant Das
927e338f9e Enable Bellatrix E2E (#10437)
* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* add in all the fixes

* fix evaluator bugs

* latest fixes

* sum

* fix to be configurable

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* fix broken conditional checks

* fix all issues

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Revert "fix broken conditional checks"

This reverts commit e118db6c20.

* LH multiclient working

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* fix race

* fix test

* Fix base fee per gas

* Fix notifypayload headroot

* tx fuzzer

* clean up with develop branch

* save progress

* 200tx/block

* add LH flags

* Sync with devleop

* cleanup

* cleanup

* hash

* fix build

* fix test

* fix go check

* fmt

* gosec

* add deps

* cleanup

* fix up

* change gas price

* remove flag

* last fix

* use new LH version

* fix up

* fix finalized block panic

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-05 14:02:46 +00:00
terence tsao
f44c99d92a Add blinded beacon block protobufs and wrappers (#10473)
Co-authored-by: rkapka <rkapka@wp.pl>
2022-04-05 14:57:19 +02:00
terence tsao
7d669f23ab Sync: process pending block with optimistic parent (#10480) 2022-04-04 18:46:05 -07:00
terence tsao
9b64c33bd1 RPC: GetValidatorPerformance for Bellatrix (#10482) 2022-04-04 19:50:57 -04:00
Potuz
defa602e50 Adapt Doppelganger to Altair (#9969)
Co-authored-by: rkapka <rkapka@wp.pl>
2022-04-04 15:55:55 +02:00
Radosław Kapka
67c8776f3c Fix execution payload field names in API Middleware (#10479) 2022-04-04 13:19:31 +02:00
210 changed files with 10887 additions and 4180 deletions

22
.github/workflows/horusec.yaml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -115,18 +115,19 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/cryptorand:go_default_library",
"//tools/analyzers/errcheck:go_default_library",
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.

View File

@@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.17.6",
go_version = "1.17.9",
nogo = "@//:nogo",
)

View File

@@ -48,6 +48,7 @@ go_library(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
@@ -65,6 +66,7 @@ go_library(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",

View File

@@ -273,13 +273,13 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
// If the block has been finalized, the block will always be part of the canonical chain.
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
// If the block has been finalized, the block will always be part of the canonical chain.
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
@@ -331,7 +331,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
if err == nil {
return optimistic, nil
}
@@ -358,10 +358,14 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
}
if lastValidated == nil {
return false, errInvalidNilSummary
}
if ss.Slot > lastValidated.Slot {
return true, nil

View File

@@ -9,6 +9,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestHeadSlot_DataRace(t *testing.T) {
@@ -16,10 +17,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
s.HeadSlot()
<-wait
@@ -31,12 +35,16 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err := s.HeadRoot(context.Background())
_, err = s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
@@ -49,10 +57,14 @@ func TestHeadBlock_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wsb},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err = s.HeadBlock(context.Background())
require.NoError(t, err)
@@ -64,12 +76,16 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err := s.HeadState(context.Background())
_, err = s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -477,6 +477,9 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
@@ -493,6 +496,17 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
@@ -528,6 +542,9 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
@@ -543,6 +560,17 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {

View File

@@ -40,7 +40,15 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not update head")
}
return s.saveHead(ctx, headRoot)
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
return s.saveHead(ctx, headRoot, headBlock, headState)
}
// This defines the current chain service's view of head.
@@ -97,7 +105,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -109,6 +117,12 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
return err
}
if headState == nil || headState.IsNil() {
return errors.New("cannot save nil head state")
}
// If the head state is not available, just return nil.
// There's nothing to cache
@@ -116,31 +130,13 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
return nil
}
// Get the new head block from DB.
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
return err
}
// Get the new head state from cached state or DB.
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil || newHeadState.IsNil() {
return errors.New("cannot save nil head state")
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := newHeadBlock.Block().Slot()
newHeadSlot := headBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := newHeadBlock.Block().StateRoot()
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
@@ -172,7 +168,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// Cache the new head info.
s.setHead(headRoot, newHeadBlock, newHeadState)
s.setHead(headRoot, headBlock, headState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
@@ -182,7 +178,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()

View File

@@ -29,8 +29,10 @@ func TestSaveHead_Same(t *testing.T) {
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
require.NoError(t, service.saveHead(context.Background(), r))
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -68,7 +70,7 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -114,7 +116,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -158,7 +160,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
require.NoError(t, service.saveHead(context.Background(), headRoot))
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
}
func Test_notifyNewHeadEvent(t *testing.T) {

View File

@@ -1,9 +1,15 @@
package blockchain
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
)
var errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b block.SignedBeaconBlock) {
s.initSyncBlocksLock.Lock()
@@ -20,13 +26,33 @@ func (s *Service) hasInitSyncBlock(r [32]byte) bool {
return ok
}
// This retrieves a beacon block from the initial sync blocks cache using the root of
// the block.
func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
// Returns true if a block for root `r` exists in the initial sync blocks cache or the DB.
func (s *Service) hasBlockInInitSyncOrDB(ctx context.Context, r [32]byte) bool {
if s.hasInitSyncBlock(r) {
return true
}
return s.cfg.BeaconDB.HasBlock(ctx, r)
}
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
// Error is returned if the block is not found in either cache or DB.
func (s *Service) getBlock(ctx context.Context, r [32]byte) (block.SignedBeaconBlock, error) {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
b := s.initSyncBlocks[r]
return b
// Check cache first because it's faster.
b, ok := s.initSyncBlocks[r]
s.initSyncBlocksLock.RUnlock()
var err error
if !ok {
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve block from db")
}
}
if err := helpers.BeaconBlockIsNil(b); err != nil {
return nil, errBlockNotFoundInCacheOrDB
}
return b, nil
}
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned

View File

@@ -0,0 +1,72 @@
package blockchain
import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestService_getBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := util.NewBeaconBlock()
b2.Block.Slot = 100
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
// block not found
_, err = s.getBlock(ctx, [32]byte{})
require.ErrorIs(t, err, errBlockNotFoundInCacheOrDB)
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(r1, b)
got, err := s.getBlock(ctx, r1)
require.NoError(t, err)
require.DeepEqual(t, b, got)
// block in db
b, err = wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
got, err = s.getBlock(ctx, r2)
require.NoError(t, err)
require.DeepEqual(t, b, got)
}
func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := util.NewBeaconBlock()
b2.Block.Slot = 100
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
// block not found
require.Equal(t, false, s.hasBlockInInitSyncOrDB(ctx, [32]byte{}))
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(r1, b)
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
// block in db
b, err = wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r2))
}

View File

@@ -138,6 +138,26 @@ var (
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_valid_node_count",
Help: "Count the number of valid nodes after newPayload EE call",
})
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_optimistic_node_count",
Help: "Count the number of optimistic nodes after newPayload EE call",
})
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_invalid_node_count",
Help: "Count the number of invalid nodes after newPayload EE call",
})
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_valid_node_count",
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -5,14 +5,21 @@ import (
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -20,7 +27,7 @@ import (
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
@@ -39,9 +46,9 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
return nil, err
}
var finalizedHash []byte
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
@@ -60,24 +67,36 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
FinalizedBlockHash: finalizedHash,
}
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get payload attribute")
}
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
}
return payloadID, nil
}
@@ -108,20 +127,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
if err != nil {
return false, errors.Wrap(err, "could not get execution payload")
}
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return false, nil
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
return false, errors.New("could not validate an INVALID payload from execution engine")
default:
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
newPayloadValidNodeCount.Inc()
// During the transition event, the transition block should be verified for sanity.
if blocks.IsPreBellatrixVersion(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.
@@ -168,3 +202,69 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
}
return parentIsExecutionBlock, nil
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, nil, 0, nil
}
// Get previous randao.
st = st.Copy()
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
return false, nil, 0, err
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return false, nil, 0, nil
}
// Get fee recipient.
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": fieldparams.EthBurnAddressHex,
}).Error("Fee recipient not set. Using burn address")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
default:
feeRecipient = recipient
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
return false, nil, 0, err
}
attr := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
}
return true, attr, proposerID, nil
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
}
return nil
}

View File

@@ -5,6 +5,9 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
@@ -24,6 +27,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
@@ -44,8 +48,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
state: st,
}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
@@ -157,7 +166,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
st, _ := util.DeterministicGenesisState(t, 1)
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -183,13 +193,6 @@ func Test_NotifyNewPayload(t *testing.T) {
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
@@ -197,10 +200,32 @@ func Test_NotifyNewPayload(t *testing.T) {
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
BlockNumber: 1,
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
r, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
tests := []struct {
name string
@@ -244,7 +269,7 @@ func Test_NotifyNewPayload(t *testing.T) {
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
errString: "could not validate an INVALID payload from execution engine",
isValidPayload: false,
},
{
@@ -573,6 +598,47 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
require.Equal(t, true, candidate)
}
func Test_GetPayloadAttribute(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
require.NoError(t, err)
require.Equal(t, false, hasPayload)
require.Equal(t, types.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := types.ValidatorIndex(1)
slot := types.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
st, _ := util.DeterministicGenesisState(t, 1)
hook := logTest.NewGlobal()
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -588,7 +654,6 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
@@ -671,3 +736,59 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
}
func TestService_removeInvalidBlockAndState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Deleting unknown block should not error.
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
// Happy case
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
r1, err := blk1.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 1,
Root: r1[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
r2, err := blk2.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 2,
Root: r2[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
require.Equal(t, false, service.hasBlock(ctx, r1))
require.Equal(t, false, service.hasBlock(ctx, r2))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
has, err := service.cfg.StateGen.HasState(ctx, r1)
require.NoError(t, err)
require.Equal(t, false, has)
has, err = service.cfg.StateGen.HasState(ctx, r2)
require.NoError(t, err)
require.Equal(t, false, has)
}

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -66,6 +67,14 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
}
}
// WithProposerIdsCache for proposer id cache.
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
return func(s *Service) error {
s.cfg.ProposerSlotIndexCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {

View File

@@ -76,15 +76,10 @@ func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *eth
// verifyBeaconBlock verifies beacon head block is known and not from the future.
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
b, err := s.cfg.BeaconDB.Block(ctx, r)
b, err := s.getBlock(ctx, r)
if err != nil {
return err
}
// If the block does not exist in db, check again if block exists in initial sync block cache.
// This could happen as the node first syncs to head.
if (b == nil || b.IsNil()) && s.hasInitSyncBlock(r) {
b = s.getInitSyncBlock(r)
}
if err := helpers.BeaconBlockIsNil(b); err != nil {
return err
}

View File

@@ -438,7 +438,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
require.NoError(t, err)
d := util.HydrateAttestationData(&ethpb.AttestationData{})
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
require.Equal(t, errBlockNotFoundInCacheOrDB, service.verifyBeaconBlock(ctx, d))
}
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {

View File

@@ -126,10 +126,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return errNotOptimisticCandidate
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
@@ -208,10 +210,18 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err != nil {
log.WithError(err).Warn("Could not update head")
}
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
headBlock, err := s.getBlock(ctx, headRoot)
if err != nil {
return err
}
if err := s.saveHead(ctx, headRoot); err != nil {
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return err
}
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not save head")
}
@@ -258,7 +268,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
@@ -424,7 +434,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
}
}
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
return nil, nil, err
}
}
@@ -601,9 +611,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
@@ -247,7 +248,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
}
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil {
return err
}
@@ -316,17 +317,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
return nil, ctx.Err()
}
signed, err := s.cfg.BeaconDB.Block(ctx, r)
signed, err := s.getBlock(ctx, r)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
}
if s.hasInitSyncBlock(r) {
signed = s.getInitSyncBlock(r)
}
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
return nil, errors.New("nil block")
return nil, err
}
b := signed.Block()
if b.Slot() == slot || b.Slot() < slot {
@@ -402,10 +395,17 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
return errors.Wrap(err, "could not cast eth1 deposit index")
}
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
eth1DepositIndex -= 1
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
return nil

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"strconv"
"sync"
"testing"
"time"
@@ -1454,6 +1455,83 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.Equal(t, f.Epoch, service.FinalizedCheckpt().Epoch)
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
require.NoError(t, err)
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
require.NoError(t, err)
for i := 0; i < 10; i++ {
var wg sync.WaitGroup
wg.Add(4)
go func() {
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
}
}
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
@@ -1516,6 +1594,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
@@ -1529,8 +1608,64 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
gs2 := gs.Copy()
assert.NoError(t, gs2.SetEth1Data(&ethpb.Eth1Data{DepositCount: 15}))
assert.NoError(t, gs2.SetEth1DepositIndex(13))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
depositCache.InsertFinalizedDeposits(ctx, 2)
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
// Insert New Finalized State with higher deposit count.
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}

View File

@@ -83,7 +83,8 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
return nil
}
@@ -176,20 +177,41 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
if s.headRoot() == newHeadRoot {
return
}
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
return // We don't have the block, don't notify the engine and update head.
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
_, err := s.notifyForkchoiceUpdate(s.ctx,
s.headBlock().Block(),
s.headRoot(),
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
return
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get state from db")
return
}
_, err = s.notifyForkchoiceUpdate(s.ctx,
headState,
newHeadBlock.Block(),
newHeadRoot,
bytesutil.ToBytes32(finalized.Root),
)
if err != nil {
log.WithError(err).Error("could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot); err != nil {
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
}

View File

@@ -6,6 +6,7 @@ import (
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -133,31 +134,69 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
// Block in Cache
b := util.NewBeaconBlock()
b.Block.Slot = 1
b.Block.Slot = 2
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r[:], Epoch: 0}
service.saveInitSyncBlock(r1, wsb)
finalized := &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
root: r,
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead(ctx, [32]byte{'b'})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
b = util.NewBeaconBlock()
b.Block.Slot = 3
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
finalized = &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
}

View File

@@ -74,6 +74,7 @@ type config struct {
ChainStartFetcher powchain.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager

View File

@@ -6,7 +6,9 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/sirupsen/logrus"
)
@@ -20,8 +22,11 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, err)
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}

View File

@@ -13,6 +13,7 @@ go_library(
"common.go",
"doc.go",
"error.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
@@ -26,6 +27,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
@@ -61,6 +63,7 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_id_test.go",
"proposer_indices_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",

View File

@@ -36,7 +36,7 @@ type DepositFetcher interface {
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
}
// FinalizedDeposits stores the trie of deposits that have been included
@@ -137,6 +137,22 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
depositTrie := dc.finalizedDeposits.Deposits
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
continue
@@ -246,7 +262,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
@@ -256,10 +272,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
return dc.allDeposits(untilBlk)
}
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}

View File

@@ -459,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
Index: 1,
},
}
newFinalizedDeposit := ethpb.DepositContainer{
newFinalizedDeposit := &ethpb.DepositContainer{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
@@ -471,17 +471,17 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
}
dc.deposits = oldFinalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 1)
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
var deps [][]byte
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
for _, d := range oldFinalizedDeposits {
hash, err := d.Deposit.Data.HashTreeRoot()
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
@@ -491,6 +491,140 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{3}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 3,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{4}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 4,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{5}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 5,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
// Reinsert finalized deposits with a lower index.
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
@@ -554,7 +688,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), nil)
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
assert.Equal(t, 2, len(deps))
}
@@ -611,10 +745,89 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
})
dc.InsertFinalizedDeposits(context.Background(), 1)
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
}
func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
generateCtr := func(height uint64, index int64) *ethpb.DepositContainer {
return &ethpb.DepositContainer{
Eth1BlockHeight: height,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{uint8(index)}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: index,
}
}
finalizedDeposits := []*ethpb.DepositContainer{
generateCtr(10, 0),
generateCtr(11, 1),
generateCtr(12, 2),
generateCtr(12, 3),
generateCtr(13, 4),
generateCtr(13, 5),
generateCtr(13, 6),
generateCtr(14, 7),
}
dc.deposits = append(finalizedDeposits,
generateCtr(15, 8),
generateCtr(15, 9),
generateCtr(30, 10))
trieItems := make([][]byte, 0, len(dc.deposits))
for _, dep := range dc.allDeposits(big.NewInt(30)) {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
trieItems = append(trieItems, depHash[:])
}
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
dc.InsertFinalizedDeposits(context.Background(), 10)
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.InsertFinalizedDeposits(context.Background(), 3)
dc.InsertFinalizedDeposits(context.Background(), 4)
// Mimick finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1
for _, dep := range deps {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
assert.NoError(t, err)
}
insertIndex++
}
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 14)
fd = dc.FinalizedDeposits(context.Background())
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
insertIndex = fd.MerkleTrieIndex + 1
for _, dep := range deps {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
assert.NoError(t, err)
}
insertIndex++
}
assert.Equal(t, fd.Deposits.NumOfItems(), depositTrie.NumOfItems())
}
func TestPruneProofs_Ok(t *testing.T) {
dc, err := New()
require.NoError(t, err)

73
beacon-chain/cache/payload_id.go vendored Normal file
View File

@@ -0,0 +1,73 @@
package cache
import (
"sync"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[slot]
if !ok {
return 0, [8]byte{}, false
}
vId := ids[:vIdLength]
b := ids[vIdLength:]
var pId [pIdLength]byte
copy(pId[:], b)
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
var bytes [vpIdsLength]byte
copy(bytes[:], append(vIdBytes[:], pId[:]...))
_, ok := f.slotToProposerAndPayloadIDs[slot]
// Ok to overwrite if the slot is already set but the payload ID is not set.
// This combats the re-org case where payload assignment could change the epoch of.
if !ok || (ok && pId != [pIdLength]byte{}) {
f.slotToProposerAndPayloadIDs[slot] = bytes
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
f.Lock()
defer f.Unlock()
for s := range f.slotToProposerAndPayloadIDs {
if slot > s {
delete(f.slotToProposerAndPayloadIDs, s)
}
}
}

60
beacon-chain/cache/payload_id_test.go vendored Normal file
View File

@@ -0,0 +1,60 @@
package cache
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
cache := NewProposerPayloadIDsCache()
i, p, ok := cache.GetProposerPayloadIDs(0)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
slot := types.Slot(1234)
vid := types.ValidatorIndex(34234324)
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
slot = types.Slot(9456456)
vid = types.ValidatorIndex(6786745)
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, [pIdLength]byte{}, p)
// reset cache without pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
// reset cache with existing pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, newPid, p)
// remove cache entry
cache.PrunePayloadIDs(slot + 1)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
}

View File

@@ -175,9 +175,7 @@ func CommitteeAssignments(
return nil, nil, err
}
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
// Proposal epochs do not have a look ahead, so we skip them over here.
validProposalEpoch := epoch < nextEpoch
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
@@ -192,6 +190,15 @@ func CommitteeAssignments(
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
}
// If previous proposer indices computation is outside if current proposal epoch range,
// we need to reset state slot back to start slot so that we can compute the correct committees.
currentProposalEpoch := epoch < nextEpoch
if !currentProposalEpoch {
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
return nil, nil, err
}
}
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, nil, err

View File

@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {

View File

@@ -94,6 +94,7 @@ go_test(
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
"wss_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
@@ -101,6 +102,7 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/features:go_default_library",

View File

@@ -233,13 +233,17 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
return errDeleteFinalized
return err
}
if err := s.deleteStateSummary(root); err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return errDeleteFinalized
return ErrDeleteJustifiedAndFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {

View File

@@ -191,6 +191,16 @@ func TestStore_DeleteBlock(t *testing.T) {
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
ss := make([]*ethpb.StateSummary, len(blks))
for i, blk := range blks {
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
ss[i] = &ethpb.StateSummary{
Slot: blk.Block().Slot(),
Root: r[:],
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
@@ -216,11 +226,50 @@ func TestStore_DeleteBlock(t *testing.T) {
b, err = db.Block(ctx, root2)
require.NoError(t, err)
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
b.Block.Slot = 1
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.

View File

@@ -346,19 +346,31 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(checkpointBucket)
enc := bkt.Get(finalizedCheckpointKey)
checkpoint := &ethpb.Checkpoint{}
finalized := &ethpb.Checkpoint{}
if enc == nil {
checkpoint = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, checkpoint); err != nil {
finalized = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, finalized); err != nil {
return err
}
enc = bkt.Get(justifiedCheckpointKey)
justified := &ethpb.Checkpoint{}
if enc == nil {
justified = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, justified); err != nil {
return err
}
blockBkt := tx.Bucket(blocksBucket)
headBlkRoot := blockBkt.Get(headBlockRootKey)
bkt = tx.Bucket(stateBucket)
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteJustifiedAndFinalized
}
// Nothing to delete if state doesn't exist.
enc = bkt.Get(blockRoot[:])
if enc == nil {
return nil
}
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])

View File

@@ -110,3 +110,12 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
s.stateSummaryCache.clear()
return nil
}
// deleteStateSummary deletes a state summary object from the db using input block root.
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
s.stateSummaryCache.delete(blockRoot)
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
return bucket.Delete(blockRoot[:])
})
}

View File

@@ -37,6 +37,13 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
return ok
}
// delete state summary in cache.
func (c *stateSummaryCache) delete(r [32]byte) {
c.initSyncStateSummariesLock.Lock()
defer c.initSyncStateSummariesLock.Unlock()
delete(c.initSyncStateSummaries, r)
}
// get retrieves a state summary from the initial sync state summaries cache using the root of
// the block.
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {

View File

@@ -62,3 +62,17 @@ func TestStateSummary_CacheToDB(t *testing.T) {
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
}
}
func TestStateSummary_CanDelete(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
r1 := bytesutil.ToBytes32([]byte{'A'})
s1 := &ethpb.StateSummary{Slot: 1, Root: r1[:]}
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
require.NoError(t, db.SaveStateSummary(ctx, s1))
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
require.NoError(t, db.deleteStateSummary(r1))
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
}

View File

@@ -412,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
wantedErr := "cannot delete genesis, finalized, or head state"
wantedErr := "cannot delete finalized block or state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
}
@@ -440,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
finalizedCheckpoint := &ethpb.Checkpoint{Root: finalizedBlockRoot[:]}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
wantedErr := "cannot delete genesis, finalized, or head state"
wantedErr := "cannot delete finalized block or state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
}
@@ -465,8 +465,7 @@ func TestStore_DeleteHeadState(t *testing.T) {
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
wantedErr := "cannot delete genesis, finalized, or head state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
}
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {

View File

@@ -77,6 +77,12 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "could not save head block root")
}
// save origin block root in a special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
// rebuild the checkpoint from the block
// use it to mark the block as justified and finalized
slotEpoch, err := wblk.Block().Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
@@ -94,11 +100,5 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
}
// save origin block root in a special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
return nil
}

View File

@@ -0,0 +1,45 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestSaveOrigin(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.ConfigNames[params.Mainnet]
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
db := setupDB(t)
st, err := genesis.State(params.Mainnet.String())
require.NoError(t, err)
sb, err := st.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.LoadGenesis(ctx, sb))
// this is necessary for mainnet, because LoadGenesis is short-circuited by the embedded state,
// so the genesis root key is never written to the db.
require.NoError(t, db.EnsureEmbeddedGenesis(ctx))
cst, err := util.NewBeaconState()
require.NoError(t, err)
csb, err := cst.MarshalSSZ()
require.NoError(t, err)
cb := util.NewBeaconBlock()
scb, err := wrapper.WrappedSignedBeaconBlock(cb)
require.NoError(t, err)
cbb, err := scb.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
}

View File

@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
}
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
return []*ethpb.Deposit{}
}

View File

@@ -22,11 +22,13 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -3,8 +3,8 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errUnknownPayloadHash = errors.New("unknown payload hash")

View File

@@ -2,12 +2,15 @@ package doublylinkedtree
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -18,6 +21,7 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
pruneThreshold: defaultPruneThreshold,
}
@@ -168,7 +172,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
}
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
@@ -249,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
return ErrNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": currentNode.balance,
"nodeWeight": currentNode.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
}
currentNode.balance -= oldBalance
}
}
@@ -302,6 +318,6 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.removeNode(ctx, root)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, payloadHash)
}

View File

@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()

View File

@@ -3,9 +3,12 @@ package doublylinkedtree
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_head_slot",
@@ -48,10 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_validated_count",
Help: "The number of blocks that have been fully validated.",
},
)
)

View File

@@ -109,7 +109,7 @@ func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) boo
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
}
n.optimistic = false
validatedCount.Inc()
return n.parent.setNodeAndParentValidated(ctx)
}

View File

@@ -180,27 +180,27 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
opt, err := f.IsOptimistic(ctx, indexToHash(5))
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(4))
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(ctx, indexToHash(5))
opt, err = f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(ctx, indexToHash(4))
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(3))
opt, err = f.IsOptimistic(indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
}

View File

@@ -2,22 +2,54 @@ package doublylinkedtree
import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
s.nodesLock.Lock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
s.nodesLock.Unlock()
return invalidRoots, ErrNilNode
}
// Check if last valid hash is an ancestor of the passed node.
lastValid, ok := s.nodeByPayload[payloadHash]
if !ok || lastValid == nil {
s.nodesLock.Unlock()
return invalidRoots, errUnknownPayloadHash
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
s.nodesLock.Unlock()
return invalidRoots, ctx.Err()
}
}
// If the last valid payload is in a different fork, we remove only the
// passed node.
if firstInvalid.parent == nil {
firstInvalid = node
}
s.nodesLock.Unlock()
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, root [32]byte) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
if node == nil {
return invalidRoots, ErrNilNode
}
if !node.optimistic || node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
@@ -47,6 +79,16 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
}
}
invalidRoots = append(invalidRoots, node.root)
s.proposerBoostLock.Lock()
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
s.proposerBoostLock.Unlock()
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -24,32 +24,71 @@ import (
func TestPruneInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
payload [32]byte // the last valid hash
wantedNodeNumber int
wantedRoots [][32]byte
}{
{
[32]byte{'j'},
[32]byte{'B'},
12,
[][32]byte{[32]byte{'j'}},
},
{
[32]byte{'c'},
[32]byte{'B'},
4,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'},
[32]byte{'k'}, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'c'}},
},
{
[32]byte{'i'},
[32]byte{'H'},
12,
[][32]byte{[32]byte{'i'}},
},
{
[32]byte{'h'},
[32]byte{'G'},
11,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}},
},
{
[32]byte{'g'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'i'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'f'},
[32]byte{'D'},
11,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}},
},
{
[32]byte{'h'},
[32]byte{'C'},
5,
[][32]byte{
[32]byte{'f'},
[32]byte{'e'},
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'l'},
[32]byte{'k'},
[32]byte{'g'},
[32]byte{'d'},
},
},
{
[32]byte{'g'},
[32]byte{'E'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
@@ -58,22 +97,45 @@ func TestPruneInvalid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
roots, err := f.store.removeNode(context.Background(), tc.root)
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.payload)
require.NoError(t, err)
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
}
}
// This is a regression test (10445)
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}

View File

@@ -121,6 +121,7 @@ func (s *Store) insert(ctx context.Context,
payloadHash: payloadHash,
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent != nil {
parent.children = append(parent.children, n)

View File

@@ -107,7 +107,8 @@ func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload}
payloadHash := [32]byte{'a'}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")

View File

@@ -26,6 +26,7 @@ type Store struct {
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}

View File

@@ -24,7 +24,7 @@ type ForkChoicer interface {
type HeadRetriever interface {
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
Tips() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context, root [32]byte) (bool, error)
IsOptimistic(root [32]byte) (bool, error)
}
// BlockProcessor processes the block that's used for accounting fork choice.
@@ -71,5 +71,5 @@ type Getter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte) ([][32]byte, error)
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
}

View File

@@ -22,12 +22,14 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -5,11 +5,11 @@ import "errors"
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidNodeIndex = errors.New("node index is invalid")
var errInvalidFinalizedNode = errors.New("invalid finalized block on chain")
var ErrUnknownNodeRoot = errors.New("unknown block root")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
var errInvalidParentDelta = errors.New("parent delta is invalid")
var errInvalidNodeDelta = errors.New("node delta is invalid")
var errInvalidDeltaLength = errors.New("delta length is invalid")
var errInvalidSyncedTips = errors.New("invalid synced tips")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")

View File

@@ -85,12 +85,9 @@ func copyNode(node *Node) *Node {
return &Node{}
}
copiedRoot := [32]byte{}
copy(copiedRoot[:], node.root[:])
return &Node{
slot: node.slot,
root: copiedRoot,
root: node.root,
parent: node.parent,
payloadHash: node.payloadHash,
justifiedEpoch: node.justifiedEpoch,
@@ -98,5 +95,6 @@ func copyNode(node *Node) *Node {
weight: node.weight,
bestChild: node.bestChild,
bestDescendant: node.bestDescendant,
status: node.status,
}
}

View File

@@ -3,9 +3,12 @@ package protoarray
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-protoarray")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_head_slot",
@@ -48,16 +51,4 @@ var (
Help: "The number of times pruning happened.",
},
)
lastSyncedTipSlot = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_last_synced_tip_slot",
Help: "The slot of the last fully validated block added to the proto array.",
},
)
syncedTipsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_synced_tips_count",
Help: "The number of elements in the syncedTips structure.",
},
)
)

View File

@@ -43,8 +43,3 @@ func (n *Node) BestChild() uint64 {
func (n *Node) BestDescendant() uint64 {
return n.bestDescendant
}
// Graffiti of the fork choice node.
func (n *Node) Graffiti() [32]byte {
return n.graffiti
}

View File

@@ -16,7 +16,6 @@ func TestNode_Getters(t *testing.T) {
weight := uint64(10000)
bestChild := uint64(5)
bestDescendant := uint64(4)
graffiti := [32]byte{'b'}
n := &Node{
slot: slot,
root: root,
@@ -26,7 +25,6 @@ func TestNode_Getters(t *testing.T) {
weight: weight,
bestChild: bestChild,
bestDescendant: bestDescendant,
graffiti: graffiti,
}
require.Equal(t, slot, n.Slot())
@@ -37,5 +35,4 @@ func TestNode_Getters(t *testing.T) {
require.Equal(t, weight, n.Weight())
require.Equal(t, bestChild, n.BestChild())
require.Equal(t, bestDescendant, n.BestDescendant())
require.Equal(t, graffiti, n.Graffiti())
}

View File

@@ -3,314 +3,159 @@ package protoarray
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// This returns the minimum and maximum slot of the synced_tips tree
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
min := params.BeaconConfig().FarFutureSlot
max := types.Slot(0)
for _, slot := range f.syncedTips.validatedTips {
if slot > max {
max = slot
}
if slot < min {
min = slot
}
}
return min, max
}
// IsOptimistic returns true if this node is optimistically synced
// A optimistically synced block is synced as usual, but its
// execution payload is not validated, while the EL is still syncing.
// This function returns an error if the block is not found in the fork choice
// store
func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, error) {
if ctx.Err() != nil {
return false, ctx.Err()
}
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
index, ok := f.store.nodesIndices[root]
if !ok {
f.store.nodesLock.RUnlock()
return false, ErrUnknownNodeRoot
}
node := f.store.nodes[index]
slot := node.slot
// If the node is a synced tip, then it's fully validated
f.syncedTips.RLock()
_, ok = f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
f.store.nodesLock.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()
// If the slot is higher than the max synced tip, it's optimistic
min, max := f.boundarySyncedTips()
if slot > max {
f.store.nodesLock.RUnlock()
return true, nil
}
// If the slot is lower than the min synced tip, it's fully validated
if slot <= min {
f.store.nodesLock.RUnlock()
return false, nil
}
// if the node is a leaf of the Fork Choice tree, then it's
// optimistic
childIndex := node.BestChild()
if childIndex == NonExistentNode {
f.store.nodesLock.RUnlock()
return true, nil
}
// recurse to the child
child := f.store.nodes[childIndex]
root = child.root
f.store.nodesLock.RUnlock()
return f.IsOptimistic(ctx, root)
}
// This function returns the index of sync tip node that's ancestor to the input node.
// In the event of none, `NonExistentNode` is returned.
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
for {
if ctx.Err() != nil {
return 0, ctx.Err()
}
if _, ok := syncedTips.validatedTips[node.root]; ok {
return s.nodesIndices[node.root], nil
}
if node.parent == NonExistentNode {
return NonExistentNode, nil
}
node = s.nodes[node.parent]
}
return node.status == syncing, nil
}
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL. This routine recomputes and updates the synced_tips map to
// account for this new tip.
// WARNING: This method returns an error if the root is not found in forkchoice or
// if the root is not a leaf of the fork choice tree.
// VALID by the EL.
// WARNING: This method returns an error if the root is not found in forkchoice
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.RLock()
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
// We can only update if given root is in Fork Choice
index, ok := f.store.nodesIndices[root]
if !ok {
return errInvalidNodeIndex
return ErrUnknownNodeRoot
}
node := f.store.nodes[index]
f.store.nodesLock.RUnlock()
// Stop early if the node is Valid
optimistic, err := f.IsOptimistic(ctx, root)
if err != nil {
return err
}
if !optimistic {
return nil
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// Cache root and slot to validated tips
newTips := make(map[[32]byte]types.Slot)
newValidSlot := node.slot
newTips[root] = newValidSlot
// Compute the full valid path from the given node to its previous synced tip
// This path will now consist of fully validated blocks. Notice that
// the previous tip may have been outside the Fork Choice store.
// In this case, only one block can be in syncedTips as the whole
// Fork Choice would be a descendant of this block.
validPath := make(map[uint64]bool)
validPath[index] = true
for {
for node := f.store.nodes[index]; node.status == syncing; node = f.store.nodes[index] {
if ctx.Err() != nil {
return ctx.Err()
}
parentIndex := node.parent
if parentIndex == NonExistentNode {
node.status = valid
index = node.parent
if index == NonExistentNode {
break
}
if parentIndex >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[parentIndex]
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
validPath[parentIndex] = true
}
// Retrieve the list of leaves in the Fork Choice
// These are all the nodes that have NonExistentNode as best child.
leaves, err := f.store.leaves()
if err != nil {
return err
}
// For each leaf, recompute the new tip.
for _, i := range leaves {
node = f.store.nodes[i]
j := i
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Stop if we reached the previous tip
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
newTips[node.root] = node.slot
break
}
// Stop if we reach valid path
_, ok = validPath[j]
if ok {
newTips[node.root] = node.slot
break
}
j = node.parent
if j == NonExistentNode {
break
}
if j >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[j]
}
}
f.syncedTips.validatedTips = newTips
lastSyncedTipSlot.Set(float64(newValidSlot))
syncedTipsCount.Set(float64(len(newTips)))
return nil
}
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [32]byte) ([][32]byte, error) {
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
// It takes two parameters: the root of the INVALID block and the payload Hash
// of the last valid block.s
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
idx, ok := f.store.nodesIndices[root]
// We only support setting invalid a node existing in Forkchoice
invalidIndex, ok := f.store.nodesIndices[root]
if !ok {
return invalidRoots, errInvalidNodeIndex
return invalidRoots, ErrUnknownNodeRoot
}
node := f.store.nodes[idx]
// We only support changing status for the tips in Fork Choice store.
if node.bestChild != NonExistentNode {
return invalidRoots, errInvalidNodeIndex
node := f.store.nodes[invalidIndex]
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
if !ok || lastValidIndex == NonExistentNode {
return invalidRoots, errInvalidFinalizedNode
}
parentIndex := node.parent
// This should not happen
if parentIndex == NonExistentNode {
return invalidRoots, errInvalidNodeIndex
// Check if last valid hash is an ancestor of the passed node
firstInvalidIndex := node.parent
for ; firstInvalidIndex != NonExistentNode && firstInvalidIndex != lastValidIndex; firstInvalidIndex = node.parent {
node = f.store.nodes[firstInvalidIndex]
}
// Update the weights of the nodes subtracting the INVALID node's weight
// if the last valid hash is not an ancestor of the invalid block, we
// just remove the invalid block.
if node.parent != lastValidIndex {
node = f.store.nodes[invalidIndex]
firstInvalidIndex = invalidIndex
lastValidIndex = node.parent
if lastValidIndex == NonExistentNode {
return invalidRoots, errInvalidFinalizedNode
}
} else {
firstInvalidIndex = f.store.nodesIndices[node.root]
}
// Update the weights of the nodes subtracting the first INVALID node's weight
weight := node.weight
node = f.store.nodes[parentIndex]
for {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
node.weight -= weight
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
parent := copyNode(f.store.nodes[parentIndex])
// delete the invalid node, order is important
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
delete(f.store.nodesIndices, root)
invalidRoots = append(invalidRoots, root)
// Fix parent and best child for each node
for _, node := range f.store.nodes {
if node.parent == NonExistentNode {
node.parent = NonExistentNode
} else if node.parent > idx {
node.parent -= 1
}
if node.bestChild == NonExistentNode || node.bestChild == idx {
node.bestChild = NonExistentNode
} else if node.bestChild > idx {
node.bestChild -= 1
}
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
node.bestDescendant = NonExistentNode
} else if node.bestDescendant > idx {
node.bestDescendant -= 1
}
var validNode *Node
for index := lastValidIndex; index != NonExistentNode; index = validNode.parent {
validNode = f.store.nodes[index]
validNode.weight -= weight
}
// Update the parent's best child and best descendant if necessary.
if parent.bestChild == idx || parent.bestDescendant == idx {
for childIndex, child := range f.store.nodes {
if child.parent == parentIndex {
err := f.store.updateBestChildAndDescendant(
parentIndex, uint64(childIndex))
if err != nil {
return invalidRoots, err
}
break
// Find the current proposer boost (it should be set to zero if an
// INVALID block was boosted)
f.store.proposerBoostLock.RLock()
boostRoot := f.store.proposerBoostRoot
previousBoostRoot := f.store.previousProposerBoostRoot
f.store.proposerBoostLock.RUnlock()
// Remove the invalid roots from our store maps and adjust their weight
// to zero
boosted := node.root == boostRoot
previouslyBoosted := node.root == previousBoostRoot
invalidIndices := map[uint64]bool{firstInvalidIndex: true}
node.status = invalid
node.weight = 0
delete(f.store.nodesIndices, node.root)
delete(f.store.canonicalNodes, node.root)
delete(f.store.payloadIndices, node.payloadHash)
for index := firstInvalidIndex + 1; index < uint64(len(f.store.nodes)); index++ {
invalidNode := f.store.nodes[index]
if _, ok := invalidIndices[invalidNode.parent]; !ok {
continue
}
if invalidNode.status == valid {
return invalidRoots, errInvalidOptimisticStatus
}
if !boosted && invalidNode.root == boostRoot {
boosted = true
}
if !previouslyBoosted && invalidNode.root == previousBoostRoot {
previouslyBoosted = true
}
invalidNode.status = invalid
invalidIndices[index] = true
invalidNode.weight = 0
delete(f.store.nodesIndices, invalidNode.root)
delete(f.store.canonicalNodes, invalidNode.root)
delete(f.store.payloadIndices, invalidNode.payloadHash)
}
if boosted {
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
return invalidRoots, err
}
}
if previouslyBoosted {
f.store.proposerBoostLock.Lock()
f.store.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
f.store.previousProposerBoostScore = 0
f.store.proposerBoostLock.Unlock()
}
for index := range invalidIndices {
invalidRoots = append(invalidRoots, f.store.nodes[index].root)
}
// Update the best child and descendant
for i := len(f.store.nodes) - 1; i >= 0; i-- {
n := f.store.nodes[i]
if n.parent != NonExistentNode {
if err := f.store.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
return invalidRoots, err
}
}
}
// Return early if the parent is not a synced_tip.
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
parentRoot := parent.root
_, ok = f.syncedTips.validatedTips[parentRoot]
if !ok {
return invalidRoots, nil
}
leaves, err := f.store.leaves()
if err != nil {
return invalidRoots, err
}
for _, i := range leaves {
node = f.store.nodes[i]
for {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
// Return early if the parent is still a synced tip
if node.root == parentRoot {
return invalidRoots, nil
}
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
}
delete(f.syncedTips.validatedTips, parentRoot)
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
return invalidRoots, nil
}

View File

@@ -10,116 +10,38 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
)
// We test the algorithm to check the optimistic status of a node. The
// status for this test is the following branching diagram
//
// -- E -- F
// /
// -- C -- D
// /
// 0 -- 1 -- A -- B -- J -- K
// \ /
// -- G -- H -- I
//
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
// E, F, G, H, J, K are optimistic.
// Synced Tips are nodes B, C, D
// nodes 0 and 1 are outside the Fork Choice Store.
func slicesEqual(a, b [][32]byte) bool {
if len(a) != len(b) {
return false
}
func TestOptimistic(t *testing.T) {
mapA := make(map[[32]byte]bool, len(a))
for _, root := range a {
mapA[root] = true
}
for _, root := range b {
_, ok := mapA[root]
if !ok {
return false
}
}
return true
}
func TestOptimistic_Outside_ForkChoice(t *testing.T) {
root0 := bytesutil.ToBytes32([]byte("hello0"))
root1 := bytesutil.ToBytes32([]byte("hello1"))
nodeA := &Node{
slot: types.Slot(100),
root: bytesutil.ToBytes32([]byte("helloA")),
bestChild: 1,
}
nodeB := &Node{
slot: types.Slot(101),
root: bytesutil.ToBytes32([]byte("helloB")),
bestChild: 2,
parent: 0,
}
nodeC := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloC")),
bestChild: 3,
parent: 1,
}
nodeD := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloD")),
bestChild: NonExistentNode,
parent: 2,
}
nodeE := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloE")),
bestChild: 5,
parent: 2,
}
nodeF := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloF")),
bestChild: NonExistentNode,
parent: 4,
}
nodeG := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloG")),
bestChild: 7,
parent: 1,
}
nodeH := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloH")),
bestChild: 8,
parent: 6,
}
nodeI := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloI")),
bestChild: NonExistentNode,
parent: 7,
}
nodeJ := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloJ")),
bestChild: 10,
parent: 6,
}
nodeK := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloK")),
bestChild: NonExistentNode,
parent: 9,
status: valid,
}
nodes := []*Node{
nodeA,
nodeB,
nodeC,
nodeD,
nodeE,
nodeF,
nodeG,
nodeH,
nodeI,
nodeJ,
nodeK,
}
ni := map[[32]byte]uint64{
nodeA.root: 0,
nodeB.root: 1,
nodeC.root: 2,
nodeD.root: 3,
nodeE.root: 4,
nodeF.root: 5,
nodeG.root: 6,
nodeH.root: 7,
nodeI.root: 8,
nodeJ.root: 9,
nodeK.root: 10,
}
s := &Store{
@@ -127,82 +49,14 @@ func TestOptimistic(t *testing.T) {
nodesIndices: ni,
}
tips := map[[32]byte]types.Slot{
nodeB.root: nodeB.slot,
nodeC.root: nodeC.slot,
nodeD.root: nodeD.slot,
}
st := &optimisticStore{
validatedTips: tips,
}
f := &ForkChoice{
store: s,
syncedTips: st,
store: s,
}
ctx := context.Background()
// We test the implementation of boundarySyncedTips
min, max := f.boundarySyncedTips()
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
// We test first nodes outside the Fork Choice store
_, err := f.IsOptimistic(ctx, root0)
_, err := f.IsOptimistic(root0)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
_, err = f.IsOptimistic(ctx, root1)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
// We check all nodes in the Fork Choice store.
op, err := f.IsOptimistic(ctx, nodeA.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeB.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeC.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeD.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeE.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeF.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeG.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeH.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeI.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeJ.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeK.root)
require.NoError(t, err)
require.Equal(t, op, true)
// request a write Lock to synced Tips regression #10289
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
}
// This tests the algorithm to update syncedTips
// This tests the algorithm to update optimistic Status
// We start with the following diagram
//
// E -- F
@@ -213,165 +67,105 @@ func TestOptimistic(t *testing.T) {
// \ \
// J -- K -- L
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice
// The Chain A -- B -- C -- D -- E is VALID.
//
func TestSetOptimisticToValid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
tests := []struct {
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
newTips map[[32]byte]types.Slot // the updated synced tips
wantedErr error
root [32]byte // the root of the new VALID block
testRoot [32]byte // root of the node we will test optimistic status
wantedOptimistic bool // wanted optimistic status for tested node
wantedErr error // wanted error message
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'i'},
false,
nil,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'f'},
true,
nil,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
nil,
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'b'},
false,
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
map[[32]byte]types.Slot{
[32]byte{'z'}: 90,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
false,
nil,
},
{
[32]byte{'b'},
[32]byte{'b'},
false,
nil,
},
{
[32]byte{'b'},
[32]byte{'h'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
true,
nil,
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
},
[32]byte{'b'},
[32]byte{'a'},
false,
nil,
},
{
[32]byte{'k'},
[32]byte{'k'},
false,
nil,
},
{
[32]byte{'k'},
[32]byte{'l'},
true,
nil,
},
{
[32]byte{'p'},
map[[32]byte]types.Slot{},
map[[32]byte]types.Slot{},
errInvalidNodeIndex,
[32]byte{},
false,
ErrUnknownNodeRoot,
},
}
for _, tc := range tests {
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
err := f.SetOptimisticToValid(context.Background(), tc.root)
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{'e'}))
optimistic, err := f.IsOptimistic([32]byte{'b'})
require.NoError(t, err)
require.Equal(t, false, optimistic)
err = f.SetOptimisticToValid(context.Background(), tc.root)
if tc.wantedErr != nil {
require.ErrorIs(t, err, tc.wantedErr)
} else {
require.NoError(t, err)
f.syncedTips.RLock()
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
f.syncedTips.RUnlock()
optimistic, err := f.IsOptimistic(tc.testRoot)
require.NoError(t, err)
require.Equal(t, tc.wantedOptimistic, optimistic)
}
}
}
@@ -387,70 +181,81 @@ func TestSetOptimisticToValid(t *testing.T) {
// \ \
// J(1) -- K(1) -- L(0)
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice. The numbers in parentheses are
// the weights of the nodes before removal
// And the chain A -- B -- C -- D -- E has been fully validated. The numbers in parentheses are
// the weights of the nodes.
//
func TestSetOptimisticToInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
tips map[[32]byte]types.Slot // the old synced tips
wantedParentTip bool
name string // test description
root [32]byte // the root of the new INVALID block
payload [32]byte // the payload of the last valid hash
newBestChild uint64
newBestDescendant uint64
newParentWeight uint64
returnedRoots [][32]byte
}{
{
"Remove tip, parent was valid",
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
[32]byte{'B'},
3,
4,
8,
[][32]byte{[32]byte{'j'}},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
},
true,
3,
4,
12,
8,
[][32]byte{[32]byte{'j'}},
},
{
"Remove tip, parent was optimistic",
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
true,
[32]byte{'H'},
NonExistentNode,
NonExistentNode,
1,
[][32]byte{[32]byte{'i'}},
},
{
"Remove tip, lvh is inner and valid",
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
[32]byte{'D'},
6,
8,
3,
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
},
{
"Remove inner, lvh is inner and optimistic",
[32]byte{'h'},
[32]byte{'G'},
10,
12,
2,
[][32]byte{[32]byte{'h'}, [32]byte{'i'}},
},
{
"Remove tip, lvh is inner and optimistic",
[32]byte{'l'},
[32]byte{'G'},
9,
11,
2,
[][32]byte{[32]byte{'k'}, [32]byte{'l'}},
},
{
"Remove tip, lvh is not an ancestor",
[32]byte{'j'},
[32]byte{'C'},
5,
12,
7,
[][32]byte{[32]byte{'j'}},
},
{
"Remove inner, lvh is not an ancestor",
[32]byte{'g'},
[32]byte{'J'},
NonExistentNode,
NonExistentNode,
1,
[][32]byte{[32]byte{'i'}},
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
},
}
for _, tc := range tests {
@@ -458,184 +263,71 @@ func TestSetOptimisticToInvalid(t *testing.T) {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
f.store.nodesLock.Lock()
for i, node := range f.store.nodes {
node.weight = weights[i]
}
// Make j be the best child and descendant of b
nodeB := f.store.nodes[2]
nodeB.bestChild = 4
nodeB.bestDescendant = 4
idx := f.store.nodesIndices[tc.root]
node := f.store.nodes[idx]
parentIndex := node.parent
require.NotEqual(t, NonExistentNode, parentIndex)
parent := f.store.nodes[parentIndex]
f.store.nodesLock.Unlock()
roots, err := f.SetOptimisticToInvalid(context.Background(), tc.root)
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{'e'}))
roots, err := f.SetOptimisticToInvalid(ctx, tc.root, tc.payload)
require.NoError(t, err)
require.DeepEqual(t, tc.returnedRoots, roots)
f.syncedTips.RLock()
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
f.syncedTips.RUnlock()
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
require.Equal(t, tc.newBestChild, parent.bestChild)
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
require.Equal(t, tc.newParentWeight, parent.weight)
}
}
// This tests the algorithm to find the tip of a given node
// We start with the following diagram
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
//
func TestFindSyncedTip(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
tests := []struct {
root [32]byte // the root of the block
tips map[[32]byte]types.Slot // the synced tips
wanted [32]byte // the root of expected tip
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
[32]byte{'g'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'h'}: 104,
[32]byte{'k'}: 106,
},
[32]byte{'d'},
},
{
[32]byte{'e'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 103,
},
[32]byte{'d'},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'b'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'g'},
},
}
for _, tc := range tests {
f.store.nodesLock.RLock()
node := f.store.nodes[f.store.nodesIndices[tc.root]]
syncedTips := &optimisticStore{
validatedTips: tc.tips,
}
syncedTips.RLock()
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
require.NoError(t, err)
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
_, ok := f.store.nodesIndices[tc.root]
require.Equal(t, false, ok)
lvh := f.store.nodes[f.store.payloadIndices[tc.payload]]
require.Equal(t, true, slicesEqual(tc.returnedRoots, roots))
require.Equal(t, tc.newBestChild, lvh.bestChild)
require.Equal(t, tc.newBestDescendant, lvh.bestDescendant)
require.Equal(t, tc.newParentWeight, lvh.weight)
require.Equal(t, syncing, f.store.nodes[8].status /* F */)
require.Equal(t, valid, f.store.nodes[5].status /* E */)
f.store.nodesLock.RUnlock()
syncedTips.RUnlock()
}
}
// This is a regression test (10341)
func TestIsOptimistic_DeadLock(t *testing.T) {
func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
tips := map[[32]byte]types.Slot{
[32]byte{'a'}: 100,
[32]byte{'d'}: 102,
}
f.syncedTips.validatedTips = tips
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'B'})
require.ErrorIs(t, ErrUnknownNodeRoot, err)
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{'p'})
require.ErrorIs(t, errInvalidFinalizedNode, err)
}
// This is a regression test (10445)
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}

View File

@@ -9,8 +9,10 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -30,43 +32,14 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *Fo
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
v := make([]Vote, 0)
st := &optimisticStore{
validatedTips: make(map[[32]byte]types.Slot),
}
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
}
// SetSyncedTips sets the synced and validated tips from the passed map
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
if len(tips) == 0 {
return errInvalidSyncedTips
}
newTips := make(map[[32]byte]types.Slot, len(tips))
for k, v := range tips {
newTips[k] = v
}
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
f.syncedTips.validatedTips = newTips
return nil
}
// SyncedTips returns the synced and validated tips from the fork choice store.
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
m := make(map[[32]byte]types.Slot)
for k, v := range f.syncedTips.validatedTips {
m[k] = v
}
return m
return &ForkChoice{store: s, balances: b, votes: v}
}
// Head returns the head root from fork choice store.
@@ -159,7 +132,7 @@ func (f *ForkChoice) InsertOptimisticBlock(
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
return f.store.prune(ctx, finalizedRoot)
}
// HasNode returns true if the node exists in fork choice store,
@@ -375,6 +348,7 @@ func (s *Store) insert(ctx context.Context,
}
s.nodesIndices[root] = index
s.payloadIndices[payloadHash] = index
s.nodes = append(s.nodes, n)
// Update parent with the best child and descendant only if it's available.
@@ -443,6 +417,7 @@ func (s *Store) applyWeightChanges(
}
iProposerScore, err := pmath.Int(proposerScore)
if err != nil {
s.proposerBoostLock.Unlock()
return err
}
nodeDelta = nodeDelta + iProposerScore
@@ -453,6 +428,16 @@ func (s *Store) applyWeightChanges(
if nodeDelta < 0 {
d := uint64(-nodeDelta)
if n.weight < d {
s.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeDelta": d,
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
"nodeWeight": n.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
"previousProposerBoostScore": s.previousProposerBoostScore,
}).Warning("node with invalid weight, setting it to zero")
s.proposerBoostLock.RUnlock()
n.weight = 0
} else {
n.weight -= d
@@ -598,7 +583,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
// prune prunes the store with the new finalized root. The tree is only
// pruned if the input finalized root are different than the one in stored and
// the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
defer span.End()
@@ -618,18 +603,9 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
return nil
}
// Traverse through the node list starting from the finalized node at index 0.
// Nodes that are not branching off from the finalized node will be removed.
syncedTips.Lock()
defer syncedTips.Unlock()
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
finalizedNode := s.nodes[finalizedIndex]
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
if err != nil {
return err
}
finalizedNode.parent = NonExistentNode
canonicalNodes[0] = finalizedNode
canonicalNodesMap[finalizedIndex] = uint64(0)
@@ -638,20 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
node := copyNode(s.nodes[idx])
parentIdx, ok := canonicalNodesMap[node.parent]
if ok {
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
currentIndex := uint64(len(canonicalNodes))
s.nodesIndices[node.root] = currentIndex
s.payloadIndices[node.payloadHash] = currentIndex
canonicalNodesMap[idx] = currentIndex
node.parent = parentIdx
canonicalNodes = append(canonicalNodes, node)
} else {
// Remove node and synced tip that is not part of finalized branch.
// Remove node that is not part of finalized branch.
delete(s.nodesIndices, node.root)
_, ok := syncedTips.validatedTips[node.root]
if ok && idx != finalizedTipIndex {
delete(syncedTips.validatedTips, node.root)
}
delete(s.canonicalNodes, node.root)
delete(s.payloadIndices, node.payloadHash)
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
s.canonicalNodes[finalizedRoot] = true
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
// Recompute the best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {
@@ -665,7 +643,6 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
s.nodes = canonicalNodes
prunedCount.Inc()
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
return nil
}
@@ -673,6 +650,10 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
if node.status == invalid {
return false, nil
}
var bestDescendantViable bool
bestDescendantIndex := node.bestDescendant
@@ -704,20 +685,6 @@ func (s *Store) viableForHead(node *Node) bool {
return justified && finalized
}
// Returns the list of leaves in the Fork Choice store.
// These are all the nodes that have NonExistentNode as best child.
// This internal method assumes that the caller holds a lock in s.nodesLock.
func (s *Store) leaves() ([]uint64, error) {
var leaves []uint64
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
node := s.nodes[i]
if node.bestChild == NonExistentNode {
leaves = append(leaves, i)
}
}
return leaves, nil
}
// Tips returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {

View File

@@ -100,7 +100,7 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
func TestStore_Insert_UnknownParent(t *testing.T) {
// The new node does not have a parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1))
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
@@ -113,7 +113,7 @@ func TestStore_Insert_UnknownParent(t *testing.T) {
func TestStore_Insert_KnownParent(t *testing.T) {
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
// The new node builds on top of the parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
s.nodes = []*Node{{}}
p := [32]byte{'B'}
s.nodesIndices[p] = 0
@@ -336,11 +336,10 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
})
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
syncedTips := &optimisticStore{}
// Finalized root is at index 99 so everything before 99 should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -376,11 +375,10 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -415,16 +413,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
parent: uint64(numOfNodes - 2),
})
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -444,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestDescendant: 1,
root: indexToHash(uint64(0)),
parent: NonExistentNode,
payloadHash: [32]byte{'A'},
},
{
slot: 101,
@@ -451,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
parent: 0,
payloadHash: [32]byte{'B'},
},
{
slot: 101,
@@ -458,9 +457,9 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
parent: 0,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
payloadHash: [32]byte{'C'},
},
}
syncedTips := &optimisticStore{}
s := &Store{
pruneThreshold: 0,
nodes: nodes,
@@ -469,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
indexToHash(uint64(1)): 1,
indexToHash(uint64(2)): 2,
},
canonicalNodes: map[[32]byte]bool{
indexToHash(uint64(0)): true,
indexToHash(uint64(1)): true,
indexToHash(uint64(2)): true,
},
payloadIndices: map[[32]byte]uint64{
[32]byte{'A'}: 0,
[32]byte{'B'}: 1,
[32]byte{'C'}: 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
require.Equal(t, len(s.nodes), 1)
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, 1, len(s.nodes))
require.Equal(t, 1, len(s.nodesIndices))
require.Equal(t, 1, len(s.canonicalNodes))
require.Equal(t, 1, len(s.payloadIndices))
}
// This test starts with the following branching diagram
@@ -486,38 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
// Synced tips are B, D and E. And we finalize F. All that is left in fork
// choice is F, and the only synced tip left is E which is now away from Fork
// Choice.
func TestStore_PruneSyncedTips(t *testing.T) {
func TestStore_PruneBranched(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
syncedTips := &optimisticStore{
validatedTips: map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
tests := []struct {
finalizedRoot [32]byte
wantedCanonical [32]byte
wantedNonCanonical [32]byte
canonicalCount int
payloadHash [32]byte
payloadIndex uint64
nonExistentPayload [32]byte
}{
{
[32]byte{'f'},
[32]byte{'f'},
[32]byte{'a'},
1,
[32]byte{'F'},
0,
[32]byte{'H'},
},
{
[32]byte{'d'},
[32]byte{'e'},
[32]byte{'i'},
3,
[32]byte{'E'},
1,
[32]byte{'C'},
},
{
[32]byte{'b'},
[32]byte{'f'},
[32]byte{'h'},
5,
[32]byte{'D'},
3,
[32]byte{'A'},
},
}
f.syncedTips = syncedTips
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, len(f.syncedTips.validatedTips))
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
require.Equal(t, true, ok)
for _, tc := range tests {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
f.store.pruneThreshold = 0
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
require.Equal(t, false, ok)
}
}
func TestStore_LeadsToViableHead(t *testing.T) {
@@ -546,20 +594,6 @@ func TestStore_LeadsToViableHead(t *testing.T) {
}
}
func TestStore_SetSyncedTips(t *testing.T) {
f := setup(1, 1)
tips := make(map[[32]byte]types.Slot)
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
require.NoError(t, f.SetSyncedTips(tips))
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
require.Equal(t, 1, len(f.syncedTips.validatedTips))
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
require.Equal(t, true, ok)
require.Equal(t, types.Slot(1), slot)
}
func TestStore_ViableForHead(t *testing.T) {
tests := []struct {
n *Node

View File

@@ -9,11 +9,10 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
syncedTips *optimisticStore
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
}
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
@@ -28,6 +27,7 @@ type Store struct {
nodes []*Node // list of block nodes, each node is a representation of one block.
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}
@@ -44,15 +44,17 @@ type Node struct {
weight uint64 // weight of this node.
bestChild uint64 // bestChild index of this node.
bestDescendant uint64 // bestDescendant of this node.
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
status status // optimistic status of this node
}
// optimisticStore defines a structure that tracks the tips of the fully
// validated blocks tree.
type optimisticStore struct {
validatedTips map[[32]byte]types.Slot
sync.RWMutex
}
// enum used as optimistic status of a node
type status uint8
const (
syncing status = iota // the node is optimistic
valid //fully validated node
invalid // invalid execution payload
)
// Vote defines an individual validator's vote.
type Vote struct {

View File

@@ -12,7 +12,6 @@ import (
func TestVotes_CanFindHead(t *testing.T) {
balances := []uint64{1, 1}
f := setup(1, 1)
syncedTips := &optimisticStore{}
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
@@ -249,7 +248,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
@@ -273,7 +272,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)

View File

@@ -18,6 +18,7 @@ go_library(
"//api/gateway:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",

View File

@@ -21,6 +21,7 @@ import (
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
@@ -94,6 +95,7 @@ type BeaconNode struct {
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
depositCache *depositcache.DepositCache
proposerIdsCache *cache.ProposerPayloadIDsCache
stateFeed *event.Feed
blockFeed *event.Feed
opFeed *event.Feed
@@ -152,6 +154,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
}
for _, opt := range opts {
@@ -585,6 +588,7 @@ func (b *BeaconNode) registerBlockchainService() error {
blockchain.WithStateGen(b.stateGen),
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
blockchain.WithProposerIdsCache(b.proposerIdsCache),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
if err != nil {
@@ -801,6 +805,7 @@ func (b *BeaconNode) registerRPCService() error {
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
ProposerIdsCache: b.proposerIdsCache,
ExecutionEngineCaller: web3Service,
})

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"block_cache.go",
"block_reader.go",
"check_transition_config.go",
@@ -16,6 +15,7 @@ go_library(
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
@@ -59,7 +59,6 @@ go_library(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -74,7 +73,6 @@ go_test(
name = "go_default_test",
size = "medium",
srcs = [
"auth_test.go",
"block_cache_test.go",
"block_reader_test.go",
"check_transition_config_test.go",
@@ -125,7 +123,6 @@ go_test(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_ethereum_go_ethereum//trie:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/network"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
)
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
return
}
case tm := <-ticker.C:
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
err = s.ExchangeTransitionConfiguration(ctx, cfg)
s.handleExchangeConfigurationError(err)
if !hasTtdReached {
@@ -119,11 +120,16 @@ func (s *Service) handleExchangeConfigurationError(err error) {
// Logs the terminal total difficulty status.
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
latest, err := s.LatestExecutionBlock(ctx)
if err != nil {
switch {
case errors.Is(err, hexutil.ErrEmptyString):
return false, nil
case err != nil:
return false, err
}
if latest == nil {
case latest == nil:
return false, errors.New("latest block is nil")
case latest.TotalDifficulty == "":
return false, nil
default:
}
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
if err != nil {

View File

@@ -190,6 +190,38 @@ func TestService_logTtdStatus(t *testing.T) {
require.Equal(t, false, reached)
}
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
service := &Service{
cfg: &config{},
}
service.rpcClient = rpcClient
ttd := new(uint256.Int)
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
require.NoError(t, err)
require.Equal(t, false, reached)
}
func emptyPayload() *pb.ExecutionPayload {
return &pb.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -10,9 +10,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -49,8 +52,8 @@ type EngineCaller interface {
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
@@ -174,6 +177,78 @@ func (s *Service) ExchangeTransitionConfiguration(
return nil
}
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := s.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
@@ -251,3 +326,15 @@ func isTimeout(e error) bool {
t, ok := e.(httpTimeoutError)
return ok && t.Timeout()
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}

View File

@@ -418,6 +418,155 @@ func TestClient_HTTP(t *testing.T) {
})
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
client := mocks.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
}
b, e, err := client.GetTerminalBlockHash(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()

View File

@@ -1,9 +1,6 @@
package powchain
import (
"net/http"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
@@ -11,11 +8,9 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
type Option func(s *Service) error
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
}
}
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithJWTSecret(secret []byte) Option {
return func(c *Service) error {
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
return func(s *Service) error {
if len(secret) == 0 {
return nil
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: secret,
stringEndpoints := dedupEndpoints(endpointStrings)
endpoints := make([]network.Endpoint, len(stringEndpoints))
// Overwrite authorization type for all endpoints to be of a bearer
// type.
for i, e := range stringEndpoints {
hEndpoint := HttpEndpoint(e)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)
endpoints[i] = hEndpoint
}
c.cfg.httpRPCClient = &http.Client{
Timeout: DefaultRPCHTTPTimeout,
Transport: authTransport,
// Select first http endpoint in the provided list.
var currEndpoint network.Endpoint
if len(endpointStrings) > 0 {
currEndpoint = endpoints[0]
}
s.cfg.httpEndpoints = endpoints
s.cfg.currHttpEndpoint = currEndpoint
return nil
}
}

View File

@@ -0,0 +1,188 @@
package powchain
import (
"context"
"fmt"
"net/url"
"time"
"github.com/ethereum/go-ethereum/ethclient"
gethRPC "github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
)
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
// Attach the clients to the service struct.
fetcher := ethclient.NewClient(client)
s.rpcClient = client
s.httpLogger = fetcher
s.eth1DataFetcher = fetcher
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
if err != nil {
client.Close()
return errors.Wrap(err, "could not initialize deposit contract caller")
}
s.depositContractCaller = depositContractCaller
// Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
}
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
// connection and if this does not work, we fallback to the next endpoint if defined.
func (s *Service) pollConnectionStatus(ctx context.Context) {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
currClient := s.rpcClient
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
errorLogger(err, "Could not connect to execution client endpoint")
s.runError = err
s.fallbackToNextEndpoint()
continue
}
// Close previous client, if connection was successful.
currClient.Close()
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
return
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// Forces to retry an execution client connection.
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before redialing.
time.Sleep(backOffPeriod)
currClient := s.rpcClient
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
s.runError = err
return
}
// Close previous client, if connection was successful.
currClient.Close()
// Reset run error in the event of a successful connection.
s.runError = nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
currClient := s.rpcClient
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
// Close previous client, if connection was successful.
currClient.Close()
s.updateCurrHttpEndpoint(primaryEndpoint)
}
// This is an inefficient way to search for the next endpoint, but given N is
// expected to be small, it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
if err != nil {
return nil, err
}
case "":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, err
}
client.SetHeader("Authorization", header)
}
return client, nil
}
// Checks the chain ID of the execution client to ensure
// it matches local parameters of what Prysm expects.
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
cID, err := client.ChainID(ctx)
if err != nil {
return err
}
wantChainID := params.BeaconConfig().DepositChainID
if cID.Uint64() != wantChainID {
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"math/big"
"net/http"
"reflect"
"runtime/debug"
"sort"
@@ -39,10 +38,8 @@ import (
"github.com/prysmaticlabs/prysm/container/trie"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/io/logs"
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
@@ -114,6 +111,7 @@ type Chain interface {
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
// fetching eth1 data from the clients.
type RPCDataFetcher interface {
Close()
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
// RPCClient defines the rpc methods required to interact with the eth1 node.
type RPCClient interface {
Close()
BatchCall(b []gethRPC.BatchElem) error
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
}
@@ -135,7 +134,6 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
httpRPCClient *http.Client
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
}
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a web3 service's main event loop.
func (s *Service) Start() {
if err := s.connectToPowChain(); err != nil {
log.WithError(err).Fatal("Could not connect to execution endpoint")
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
log.WithError(err).Error("Could not connect to execution endpoint")
}
log.WithFields(logrus.Fields{
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
}).Info("Connected to Ethereum execution client RPC")
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
// able to generate the genesis state.
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
@@ -253,7 +246,7 @@ func (s *Service) Start() {
s.isRunning = true
// Poll the execution client connection and fallback if errors occur.
go s.pollConnectionStatus()
s.pollConnectionStatus(s.ctx)
// Check transition configuration for the engine API client in the background.
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
if s.cancel != nil {
defer s.cancel()
}
s.closeClients()
if s.rpcClient != nil {
s.rpcClient.Close()
}
if s.eth1DataFetcher != nil {
s.eth1DataFetcher.Close()
}
return nil
}
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
func (s *Service) CurrentETH1ConnectionError() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
httpClient.Close()
rpcClient.Close()
return err
return s.runError
}
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
@@ -358,10 +353,18 @@ func (s *Service) ETH1Endpoints() []string {
func (s *Service) ETH1ConnectionErrors() []error {
var errs []error
for _, ep := range s.cfg.httpEndpoints {
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
httpClient.Close()
rpcClient.Close()
errs = append(errs, err)
client, err := s.newRPCClientWithAuth(s.ctx, ep)
if err != nil {
client.Close()
errs = append(errs, err)
continue
}
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
client.Close()
errs = append(errs, err)
continue
}
client.Close()
}
return errs
}
@@ -376,146 +379,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
return latestValidBlock, nil
}
func (s *Service) connectToPowChain() error {
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
if err != nil {
return errors.Wrap(err, "could not dial execution node")
}
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
if err != nil {
return errors.Wrap(err, "could not initialize deposit contract caller")
}
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
return errors.New("execution client RPC is nil")
}
s.httpLogger = httpClient
s.eth1DataFetcher = httpClient
s.depositContractCaller = depositContractCaller
s.rpcClient = rpcClient
s.updateConnectedETH1(true)
s.runError = nil
return nil
}
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
if err != nil {
return nil, nil, err
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {
return nil, nil, err
}
httpRPCClient.SetHeader("Authorization", header)
}
httpClient := ethclient.NewClient(httpRPCClient)
// Add a method to clean-up and close clients in the event
// of any connection failure.
closeClients := func() {
httpRPCClient.Close()
httpClient.Close()
}
// Make a simple call to ensure we are actually connected to a working node.
cID, err := httpClient.ChainID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
nID, err := httpClient.NetworkID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
if cID.Uint64() != params.BeaconConfig().DepositChainID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
}
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
}
return httpClient, httpRPCClient, nil
}
// closes down our active eth1 clients.
func (s *Service) closeClients() {
gethClient, ok := s.rpcClient.(*gethRPC.Client)
if ok {
gethClient.Close()
}
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
if ok {
httpClient.Close()
}
}
func (s *Service) pollConnectionStatus() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
errConnect := s.connectToPowChain()
if errConnect != nil {
errorLogger(errConnect, "Could not connect to powchain endpoint")
s.runError = errConnect
s.fallbackToNextEndpoint()
continue
}
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")
return
}
}
}
// checks if the eth1 node is healthy and ready to serve before
// fetching data from it.
func (s *Service) isEth1NodeSynced() (bool, error) {
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
if err != nil {
return false, err
}
if syncProg != nil {
return false, nil
}
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
return false, err
}
return !eth1HeadIsBehind(head.Time), nil
}
// Reconnect to eth1 node in case of any failure.
func (s *Service) retryETH1Node(err error) {
s.runError = err
s.updateConnectedETH1(false)
// Back off for a while before
// resuming dialing the eth1 node.
time.Sleep(backOffPeriod)
if err := s.connectToPowChain(); err != nil {
s.runError = err
return
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
if len(ctrs) == 0 {
return nil
@@ -551,11 +414,14 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// accumulates. we finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
// Deposit proofs are only used during state transition and can be safely removed to save space.
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
}
@@ -581,6 +447,7 @@ func (s *Service) processBlockHeader(header *gethTypes.Header) {
log.WithFields(logrus.Fields{
"blockNumber": s.latestEth1Data.BlockHeight,
"blockHash": hexutil.Encode(s.latestEth1Data.BlockHash),
"difficulty": header.Difficulty.String(),
}).Debug("Latest eth1 chain event")
}
@@ -649,7 +516,7 @@ func (s *Service) handleETH1FollowDistance() {
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
// check that web3 client is syncing
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
log.Warn("eth1 client is not syncing")
log.Warn("Execution client is not syncing")
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
@@ -679,6 +546,15 @@ func (s *Service) handleETH1FollowDistance() {
}
func (s *Service) initPOWService() {
// Use a custom logger to only log errors
logCounter := 0
errorLogger := func(err error, msg string) {
if logCounter > logThreshold {
log.Errorf("%s: %v", msg, err)
logCounter = 0
}
logCounter++
}
// Run in a select loop to retry in the event of any failures.
for {
@@ -689,8 +565,8 @@ func (s *Service) initPOWService() {
ctx := s.ctx
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
if err != nil {
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve latest execution client header")
continue
}
@@ -699,14 +575,14 @@ func (s *Service) initPOWService() {
s.latestEth1Data.BlockTime = header.Time
if err := s.processPastLogs(ctx); err != nil {
log.Errorf("Unable to process past logs %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to process past deposit contract logs")
continue
}
// Cache eth1 headers from our voting period.
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
log.Errorf("Unable to process past headers %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to cache headers for execution client votes")
continue
}
// Handle edge case with embedded genesis state by fetching genesis header to determine
@@ -719,15 +595,17 @@ func (s *Service) initPOWService() {
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
if err != nil {
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
continue
}
genBlock = genHeader.Number.Uint64()
}
s.chainStartData.GenesisBlock = genBlock
if err := s.savePowchainData(ctx); err != nil {
log.Errorf("Unable to save powchain data: %v", err)
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to save execution client data")
continue
}
}
return
@@ -749,24 +627,25 @@ func (s *Service) run(done <-chan struct{}) {
case <-done:
s.isRunning = false
s.runError = nil
s.rpcClient.Close()
s.updateConnectedETH1(false)
log.Debug("Context closed, exiting goroutine")
return
case <-s.headTicker.C:
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
s.pollConnectionStatus(s.ctx)
log.WithError(err).Debug("Could not fetch latest eth1 header")
s.retryETH1Node(err)
continue
}
if eth1HeadIsBehind(head.Time) {
s.pollConnectionStatus(s.ctx)
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
s.retryETH1Node(errFarBehind)
continue
}
s.processBlockHeader(head)
s.handleETH1FollowDistance()
s.checkDefaultEndpoint()
s.checkDefaultEndpoint(s.ctx)
case <-chainstartTicker.C:
if s.chainStartData.Chainstarted {
chainstartTicker.Stop()
@@ -853,59 +732,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
return hdr.Number.Uint64(), nil
}
// This performs a health check on our primary endpoint, and if it
// is ready to serve we connect to it again. This method is only
// relevant if we are on our backup endpoint.
func (s *Service) checkDefaultEndpoint() {
primaryEndpoint := s.cfg.httpEndpoints[0]
// Return early if we are running on our primary
// endpoint.
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
return
}
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
if err != nil {
log.Debugf("Primary endpoint not ready: %v", err)
return
}
log.Info("Primary endpoint ready again, switching back to it")
// Close the clients and let our main connection routine
// properly connect with it.
httpClient.Close()
rpcClient.Close()
// Close current active clients.
s.closeClients()
// Switch back to primary endpoint and try connecting
// to it again.
s.updateCurrHttpEndpoint(primaryEndpoint)
s.retryETH1Node(nil)
}
// This is an inefficient way to search for the next endpoint, but given N is expected to be
// small ( < 25), it is fine to search this way.
func (s *Service) fallbackToNextEndpoint() {
currEndpoint := s.cfg.currHttpEndpoint
currIndex := 0
totalEndpoints := len(s.cfg.httpEndpoints)
for i, endpoint := range s.cfg.httpEndpoints {
if endpoint.Equals(currEndpoint) {
currIndex = i
break
}
}
nextIndex := currIndex + 1
if nextIndex >= totalEndpoints {
nextIndex = 0
}
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
if nextIndex != currIndex {
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
}
}
// initializes our service from the provided eth1data object by initializing all the relevant
// fields and data.
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {

View File

@@ -42,6 +42,8 @@ type goodLogger struct {
backend *backends.SimulatedBackend
}
func (_ *goodLogger) Close() {}
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
if g.backend == nil {
return new(event.Feed).Subscribe(ch), nil
@@ -80,6 +82,8 @@ type goodFetcher struct {
backend *backends.SimulatedBackend
}
func (_ *goodFetcher) Close() {}
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
now := time.Now()
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
testAcc.Backend.Commit()
synced, err := web3Service.isEth1NodeSynced()
require.NoError(t, err)
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
}
func TestFollowBlock_OK(t *testing.T) {
@@ -472,7 +472,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot))
require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState))
s.cfg.stateGen = stateGen
require.NoError(t, emptyState.SetEth1DepositIndex(2))
require.NoError(t, emptyState.SetEth1DepositIndex(3))
ctx := context.Background()
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(0), Root: headRoot[:]}))
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
s.chainStartData.Chainstarted = true
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
assert.Equal(t, 0, len(deps))
}

View File

@@ -26,6 +26,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,25 +2,32 @@ package testing
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
}
// NewPayload --
@@ -58,3 +65,52 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
}
return b, e.ErrExecBlockByHash
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := e.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
currentTotalDifficulty, _ := uint256.FromBig(b)
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if blockReachedTTD {
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentTotalDifficulty, _ := uint256.FromBig(b)
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}

View File

@@ -144,6 +144,8 @@ type RPCClient struct {
Backend *backends.SimulatedBackend
}
func (_ *RPCClient) Close() {}
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return nil
}

View File

@@ -424,11 +424,11 @@ type beaconBlockBodyBellatrixJson struct {
type executionPayloadJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
CoinBase string `json:"coinbase" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptRoot string `json:"receipt_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
Random string `json:"random" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
@@ -441,11 +441,11 @@ type executionPayloadJson struct {
type executionPayloadHeaderJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
CoinBase string `json:"coinbase" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptRoot string `json:"receipt_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
Random string `json:"random" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`

View File

@@ -1024,19 +1024,20 @@ func TestProduceBlockV2(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)

View File

@@ -693,7 +693,7 @@ func (bs *Server) GetValidatorPerformance(
return nil, err
}
validatorSummary = vp
case version.Altair:
case version.Altair, version.Bellatrix:
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return nil, err
@@ -871,7 +871,7 @@ func (bs *Server) GetIndividualVotes(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
case version.Altair:
case version.Altair, version.Bellatrix:
v, bal, err = altair.InitializePrecomputeValidators(ctx, st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)

View File

@@ -2100,6 +2100,76 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
}
}
func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
ctx := context.Background()
epoch := types.Epoch(1)
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
defaultBal := params.BeaconConfig().MaxEffectiveBalance
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
require.NoError(t, headState.SetBalances(balances))
publicKey1 := bytesutil.ToBytes48([]byte{1})
publicKey2 := bytesutil.ToBytes48([]byte{2})
publicKey3 := bytesutil.ToBytes48([]byte{3})
validators := []*ethpb.Validator{
{
PublicKey: publicKey1[:],
ActivationEpoch: 5,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKey2[:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKey3[:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: nil,
InclusionDistances: nil,
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKey1[:]},
InactivityScores: []uint64{0, 0},
}
res, err := bs.GetValidatorPerformance(ctx, &ethpb.ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKey1[:], publicKey3[:], publicKey2[:]},
})
require.NoError(t, err)
if !proto.Equal(want, res) {
t.Errorf("Wanted %v\nReceived %v", want, res)
}
}
func BenchmarkListValidatorBalances(b *testing.B) {
b.StopTimer()
beaconDB := dbTest.SetupDB(b)
@@ -2475,6 +2545,98 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) {
assert.DeepEqual(t, wanted, res, "Unexpected response")
}
func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validators := uint64(32)
beaconState, _ := util.DeterministicGenesisStateBellatrix(t, validators)
startSlot, err := slots.EpochStart(1)
assert.NoError(t, err)
require.NoError(t, beaconState.SetSlot(startSlot))
b := util.NewBeaconBlock()
b.Block.Slot = startSlot
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
gen := stategen.New(beaconDB)
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Save State at the end of the epoch:
endSlot, err := slots.EpochEnd(1)
assert.NoError(t, err)
beaconState, _ = util.DeterministicGenesisStateBellatrix(t, validators)
require.NoError(t, beaconState.SetSlot(endSlot))
pb, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
for i := range pb {
pb[i] = 0xff
}
require.NoError(t, beaconState.SetCurrentParticipationBits(pb))
require.NoError(t, beaconState.SetPreviousParticipationBits(pb))
b.Block.Slot = endSlot
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
Indices: []types.ValidatorIndex{0, 1},
Epoch: 1,
})
require.NoError(t, err)
wanted := &ethpb.IndividualVotesRespond{
IndividualVotes: []*ethpb.IndividualVotesRespond_IndividualVote{
{
ValidatorIndex: 0,
PublicKey: beaconState.Validators()[0].PublicKey,
IsActiveInCurrentEpoch: true,
IsActiveInPreviousEpoch: true,
IsCurrentEpochTargetAttester: true,
IsCurrentEpochAttester: true,
IsPreviousEpochAttester: true,
IsPreviousEpochHeadAttester: true,
IsPreviousEpochTargetAttester: true,
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
Epoch: 1,
},
{
ValidatorIndex: 1,
PublicKey: beaconState.Validators()[1].PublicKey,
IsActiveInCurrentEpoch: true,
IsActiveInPreviousEpoch: true,
IsCurrentEpochTargetAttester: true,
IsCurrentEpochAttester: true,
IsPreviousEpochAttester: true,
IsPreviousEpochHeadAttester: true,
IsPreviousEpochTargetAttester: true,
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
Epoch: 1,
},
},
}
assert.DeepEqual(t, wanted, res, "Unexpected response")
}
func Test_validatorStatus(t *testing.T) {
tests := []struct {
name string

View File

@@ -77,8 +77,9 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
@@ -160,7 +161,6 @@ go_test(
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -138,7 +138,7 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
// Query the next epoch assignments for committee subnet subscriptions.
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
nextCommitteeAssignments, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
}
@@ -180,6 +180,16 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
nextAssignment.AttesterSlot = ca.AttesterSlot
nextAssignment.CommitteeIndex = ca.CommitteeIndex
}
// Cache proposer assignment for the current epoch.
for _, slot := range proposerIndexToSlots[idx] {
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
}
// Cache proposer assignment for the next epoch.
for _, slot := range nextProposerIndexToSlots[idx] {
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
}
// Prune payload ID cache for any slots before request slot.
vs.ProposerSlotIndexCache.PrunePayloadIDs(epochStartSlot)
} else {
// If the validator isn't in the beacon state, try finding their deposit to determine their status.
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)

View File

@@ -60,9 +60,10 @@ func TestGetDuties_OK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.
@@ -144,10 +145,11 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.
@@ -181,12 +183,12 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
require.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
}
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
require.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
require.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
@@ -197,7 +199,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
}
@@ -249,10 +251,11 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.
@@ -302,7 +305,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
}
@@ -340,11 +343,12 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
require.NoError(t, err)
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
@@ -399,9 +403,10 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) {
State: bState, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.
@@ -437,9 +442,10 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
pubkey0 := deposits[0].Data.PublicKey
@@ -503,11 +509,12 @@ func TestStreamDuties_OK(t *testing.T) {
Genesis: time.Now(),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.
@@ -560,11 +567,12 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
Genesis: time.Now(),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
// Test the first validator in registry.

View File

@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
case version.Phase0:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
continue
}
b.Block = &ethpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
case version.Altair:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
if !ok {
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
continue
}
b.Block = &ethpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
case version.Bellatrix:
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
if !ok {
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
continue
}
b.Block = &ethpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
}
if err := stream.Send(b); err != nil {

View File

@@ -161,7 +161,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
depositTrie = finalizedDeposits.Deposits
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
for _, dep := range upToEth1DataDeposits {

View File

@@ -3,12 +3,10 @@ package validator
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -24,9 +22,31 @@ import (
"github.com/sirupsen/logrus"
)
var (
// payloadIDCacheMiss tracks the number of payload ID requests that aren't present in the cache.
payloadIDCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "payload_id_cache_miss",
Help: "The number of payload id get requests that aren't present in the cache.",
})
// payloadIDCacheHit tracks the number of payload ID requests that are present in the cache.
payloadIDCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "payload_id_cache_hit",
Help: "The number of payload id get requests that are present in the cache.",
})
)
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
// The payload is computed given the respected time of merge.
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex) (*enginev1.ExecutionPayload, error) {
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
var pid [8]byte
copy(pid[:], payloadId[:])
payloadIDCacheHit.Inc()
return vs.ExecutionEngineCaller.GetPayload(ctx, pid)
}
payloadIDCacheMiss.Inc()
st, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
@@ -157,79 +177,7 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context) ([]byte, boo
return terminalBlockHash.Bytes(), true, nil
}
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
}
// This returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx)
}
// activationEpochNotReached returns true if activation epoch has not been reach.
@@ -246,18 +194,6 @@ func activationEpochNotReached(slot types.Slot) bool {
return false
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}
func emptyPayload() *enginev1.ExecutionPayload {
return &enginev1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -6,9 +6,9 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
types "github.com/prysmaticlabs/eth2-types"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
powtesting "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -21,26 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/util"
)
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestServer_activationEpochNotReached(t *testing.T) {
require.Equal(t, false, activationEpochNotReached(0))
@@ -106,6 +86,11 @@ func TestServer_getExecutionPayload(t *testing.T) {
payloadID: &pb.PayloadIDBytes{0x1},
validatorIndx: 1,
},
{
name: "transition completed, happy case, payload ID cached)",
st: transitionSt,
validatorIndx: 100,
},
{
name: "transition completed, could not prepare payload",
st: transitionSt,
@@ -132,10 +117,12 @@ func TestServer_getExecutionPayload(t *testing.T) {
params.OverrideBeaconConfig(cfg)
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
HeadFetcher: &chainMock.ChainService{State: tt.st},
BeaconDB: beaconDB,
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
HeadFetcher: &chainMock.ChainService{State: tt.st},
BeaconDB: beaconDB,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100})
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
@@ -146,137 +133,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
}
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
},
}
b, e, err := vs.getPowBlockHashAtTerminalTotalDifficulty(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
tests := []struct {
name string

View File

@@ -10,6 +10,7 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -2344,7 +2345,8 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
ExecutionPayload: payload,
},
BeaconDB: db,
BeaconDB: db,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)

View File

@@ -41,6 +41,7 @@ import (
type Server struct {
Ctx context.Context
AttestationCache *cache.AttestationCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
FinalizationFetcher blockchain.FinalizationFetcher

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
@@ -25,7 +26,7 @@ var errPubkeyDoesNotExist = errors.New("pubkey does not exist")
var errOptimisticMode = errors.New("the node is currently optimistic and cannot serve validators")
var nonExistentIndex = types.ValidatorIndex(^uint64(0))
const numStatesToCheck = 2
var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch participation")
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
@@ -110,44 +111,72 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
return nil, status.Error(codes.Internal, "Could not get head state")
}
currEpoch := slots.ToEpoch(headState.Slot())
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
// Return early if we are in phase0.
if headState.Version() == version.Phase0 {
log.Info("Skipping goppelganger check for Phase 0")
resp := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: false,
})
}
return resp, nil
}
headSlot := headState.Slot()
currEpoch := slots.ToEpoch(headSlot)
// If all provided keys are recent we skip this check
// as we are unable to effectively determine if a doppelganger
// is active.
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
if isRecent {
return resp, nil
}
// We walk back from the current head state to the state at the beginning of the previous 2 epochs.
// Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch.
previousEpoch, err := currEpoch.SafeSub(1)
// We request a state 32 slots ago. We are guaranteed to have
// currentSlot > 32 since we assume that we are in Altair's fork.
prevStateSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
prevEpochEnd, err := slots.EpochEnd(slots.ToEpoch(prevStateSlot))
if err != nil {
previousEpoch = currEpoch
return nil, status.Error(codes.Internal, "Could not get previous epoch's end")
}
olderEpoch, err := previousEpoch.SafeSub(1)
if err != nil {
olderEpoch = previousEpoch
}
prevState, err := vs.retrieveAfterEpochTransition(ctx, previousEpoch)
prevState, err := vs.ReplayerBuilder.ReplayerForSlot(prevEpochEnd).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get previous state")
}
olderState, err := vs.retrieveAfterEpochTransition(ctx, olderEpoch)
headCurrentParticipation, err := headState.CurrentEpochParticipation()
if err != nil {
return nil, status.Error(codes.Internal, "Could not get older state")
return nil, errParticipation
}
headPreviousParticipation, err := headState.PreviousEpochParticipation()
if err != nil {
return nil, errParticipation
}
prevCurrentParticipation, err := prevState.CurrentEpochParticipation()
if err != nil {
return nil, errParticipation
}
prevPreviousParticipation, err := prevState.PreviousEpochParticipation()
if err != nil {
return nil, errParticipation
}
resp = &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// If the validator's last recorded epoch was
// less than or equal to `numStatesToCheck` epochs ago, this method will not
// be able to catch duplicates. This is due to how attestation
// inclusion works, where an attestation for the current epoch
// is able to included in the current or next epoch. Depending
// on which epoch it is included the balance change will be
// reflected in the following epoch.
if v.Epoch+numStatesToCheck >= currEpoch {
// If the validator's last recorded epoch was less than 1 epoch
// ago, the current doppelganger check will not be able to
// identify dopplelgangers since an attestation can take up to
// 31 slots to be included.
if v.Epoch+2 >= currEpoch {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -155,37 +184,15 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
})
continue
}
valIndex, ok := olderState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
valIndex, ok := prevState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
if !ok {
// Ignore if validator pubkey doesn't exist.
continue
}
baseBal, err := olderState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
nextBal, err := prevState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
// If the next epoch's balance is higher, we mark it as an existing
// duplicate.
if nextBal > baseBal {
log.Infof("current %d with last epoch %d and difference in bal %d gwei", currEpoch, v.Epoch, nextBal-baseBal)
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: true,
})
continue
}
currBal, err := headState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
// If the current epoch's balance is higher, we mark it as an existing
// duplicate.
if currBal > nextBal {
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -374,8 +381,8 @@ func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequ
// Due to how balances are reflected for individual
// validators, we can only effectively determine if a
// validator voted or not if we are able to look
// back more than `numStatesToCheck` epochs into the past.
if v.Epoch+numStatesToCheck < headEpoch {
// back more than 2 epoch into the past.
if v.Epoch+2 < headEpoch {
validatorsAreRecent = false
// Zero out response if we encounter non-recent validators to
// guard against potential misuse.

View File

@@ -8,7 +8,6 @@ import (
"github.com/d4l3k/messagediff"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -17,7 +16,6 @@ import (
mockstategen "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen/mock"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/bls"
@@ -961,27 +959,15 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "normal doppelganger request",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 3; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
// Older Epoch State
for i := 0; i < 3; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: builder,
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1005,37 +991,19 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "doppelganger exists current epoch",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 2; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
// Older Epoch State
for i := 0; i < 2; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
currentIndices := make([]byte, 64)
currentIndices[2] = 1
require.NoError(t, hs.SetCurrentParticipationBits(currentIndices))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: builder,
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1056,7 +1024,7 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
// Add in for duplicate validator
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[2].PublicKey().Marshal(),
Epoch: 1,
Epoch: 0,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
@@ -1070,37 +1038,19 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "doppelganger exists previous epoch",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 2; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
// Older Epoch State
for i := 0; i < 2; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 200 gwei, to mock an active validator.
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-2000000000))
hs, ps, keys := createStateSetupAltair(t, 3)
prevIndices := make([]byte, 64)
prevIndices[2] = 1
require.NoError(t, ps.SetPreviousParticipationBits(prevIndices))
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: builder,
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1121,7 +1071,7 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
// Add in for duplicate validator
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[2].PublicKey().Marshal(),
Epoch: 1,
Epoch: 0,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
@@ -1135,29 +1085,26 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "multiple doppelganger exists",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 10; i < 15; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000))
}
hs, ps, keys := createStateSetupAltair(t, 3)
currentIndices := make([]byte, 64)
currentIndices[10] = 1
currentIndices[11] = 2
require.NoError(t, hs.SetPreviousParticipationBits(currentIndices))
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
// Older Epoch State
for i := 10; i < 15; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000))
prevIndices := make([]byte, 64)
for i := 12; i < 20; i++ {
prevIndices[i] = 1
}
require.NoError(t, ps.SetCurrentParticipationBits(prevIndices))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: builder,
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1167,7 +1114,7 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
// Add in for duplicate validator
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[i].PublicKey().Marshal(),
Epoch: 1,
Epoch: 0,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
@@ -1175,6 +1122,17 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
DuplicateExists: true,
})
}
for i := 15; i < 20; i++ {
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[i].PublicKey().Marshal(),
Epoch: 3,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: keys[i].PublicKey().Marshal(),
DuplicateExists: false,
})
}
return vs, request, response
},
@@ -1183,14 +1141,19 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "attesters are too recent",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, _, _, keys, _ := createStateSetup(t, 4)
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
currentIndices := make([]byte, 64)
currentIndices[0] = 1
currentIndices[1] = 2
require.NoError(t, hs.SetPreviousParticipationBits(currentIndices))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: nil,
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1208,6 +1171,76 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
})
}
return vs, request, response
},
},
{
name: "attesters are too recent(previous state)",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 23)
currentIndices := make([]byte, 64)
currentIndices[0] = 1
currentIndices[1] = 2
require.NoError(t, ps.SetPreviousParticipationBits(currentIndices))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
}
response := &ethpb.DoppelGangerResponse{Responses: make([]*ethpb.DoppelGangerResponse_ValidatorResponse, 0)}
for i := 0; i < 15; i++ {
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[i].PublicKey().Marshal(),
Epoch: 1,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: keys[i].PublicKey().Marshal(),
DuplicateExists: false,
})
}
return vs, request, response
},
},
{
name: "exit early for Phase 0",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, _, keys := createStateSetupPhase0(t, 3)
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
{
PublicKey: keys[0].PublicKey().Marshal(),
Epoch: 1,
SignedRoot: []byte{'A'},
},
},
}
response := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
{
PublicKey: keys[0].PublicKey().Marshal(),
DuplicateExists: false,
},
},
}
return vs, request, response
},
},
@@ -1228,104 +1261,36 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
}
}
func createStateSetup(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, state.BeaconState, []bls.SecretKey, *mockstategen.MockReplayerBuilder) {
rb := &mockstategen.MockReplayerBuilder{}
func createStateSetupPhase0(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, []bls.SecretKey) {
gs, keys := util.DeterministicGenesisState(t, 64)
hs := gs.Copy()
// Head State
headEpoch := head
headSlot := types.Slot(headEpoch) * params.BeaconConfig().SlotsPerEpoch
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
assert.NoError(t, hs.SetSlot(headSlot))
assingments, _, err := helpers.CommitteeAssignments(context.Background(), hs, headEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: ctr.AttesterSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, hs.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(hs)
// Previous Epoch State
prevEpoch := headEpoch - 1
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
ps := gs.Copy()
prevSlot, err := slots.EpochEnd(prevEpoch)
assert.NoError(t, err)
assert.NoError(t, ps.SetSlot(prevSlot))
assingments, _, err = helpers.CommitteeAssignments(context.Background(), ps, prevEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: ctr.AttesterSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, ps.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(ps)
// Older Epoch State
olderEpoch := prevEpoch - 1
os := gs.Copy()
olderSlot, err := slots.EpochEnd(olderEpoch)
assert.NoError(t, err)
assert.NoError(t, os.SetSlot(olderSlot))
assingments, _, err = helpers.CommitteeAssignments(context.Background(), os, olderEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
attSlot := ctr.AttesterSlot
if attSlot == olderSlot {
continue
}
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: attSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, os.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(os)
return hs, ps, os, keys, rb
return hs, ps, keys
}
func createStateSetupAltair(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, []bls.SecretKey) {
gs, keys := util.DeterministicGenesisStateAltair(t, 64)
hs := gs.Copy()
// Head State
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
assert.NoError(t, hs.SetSlot(headSlot))
// Previous Epoch State
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
ps := gs.Copy()
assert.NoError(t, ps.SetSlot(prevSlot))
return hs, ps, keys
}

View File

@@ -109,6 +109,7 @@ type Config struct {
StateGen *stategen.State
MaxMsgSize int
ExecutionEngineCaller powchain.EngineCaller
ProposerIdsCache *cache.ProposerPayloadIDsCache
}
// NewService instantiates a new RPC service instance that will
@@ -207,6 +208,7 @@ func (s *Service) Start() {
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 6de36f732d72b5c4c0c967bc0edcc752b7afdd337e829486954eb6affda84da8
// Hash: 2e923b42b8e4fcc278301da6506b212334a78169cb32c70e0d66a636435b8925
package v1
import (

2
beacon-chain/state/state-native/v2/generated.ssz.go Executable file → Normal file
View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 6a7886393e8874ccf57ea6c160647da09f5e541234a235ee71f3bf786d56a100
// Hash: ec98b14e43fd11e74e0d9e705a7afe74a77706c3e215d7940b11411859873f4b
package v2
import (

Some files were not shown because too many files have changed in this diff Show More