Compare commits

...

73 Commits

Author SHA1 Message Date
Kasey Kirkham
9c5f519b11 comment out committe caching for subsequent epoch 2022-04-28 08:13:11 -05:00
Kasey Kirkham
3bb6bc6dfd more debug logs 2022-04-27 14:31:57 -05:00
Kasey Kirkham
daaa806342 more logs and commented out code for debugging 2022-04-15 07:11:17 -05:00
Kasey Kirkham
19ed30a74c more wild attempts at debugging 2022-04-13 17:42:00 -05:00
Kasey Kirkham
cf438e90f7 bump bellatrix epoch further while debugging 2022-04-13 13:20:05 -05:00
Kasey Kirkham
c5840a86c1 more debug log context 2022-04-13 13:19:49 -05:00
Kasey Kirkham
760fe3cc2a rebuild config index after loading dynamic config 2022-04-13 13:18:54 -05:00
terence tsao
276b7e96cc fix: test errors 2022-04-12 18:26:57 -05:00
terence tsao
e39a006a7b Add: wss payload block hash as last default 2022-04-12 18:26:48 -05:00
Kasey Kirkham
e290556111 e2ez changes to backport after cp sync e2e merge 2022-04-12 13:29:30 -05:00
Kasey Kirkham
91a6928efb error logging improvements 2022-04-12 13:25:31 -05:00
Kasey Kirkham
8d5fd3da54 finalization index bug fix w/ logging buffs 2022-04-12 12:13:50 -05:00
Kasey Kirkham
8b6ab3e816 revert ws refactoring while debugging e2e 2022-04-11 10:02:55 -05:00
Kasey Kirkham
51598ed0d5 set MinValidatorWithdrawabilityDelay=1 for ws test 2022-04-11 10:02:55 -05:00
Kasey Kirkham
2a9129ee85 use config files for alt e2e configs 2022-04-11 10:02:55 -05:00
Kasey Kirkham
aac1f4895c rm hardcoded cfg in bootnode; cp sync as mainnet 2022-04-11 10:02:55 -05:00
Kasey Kirkham
86e36fe3a2 fixing up some rebase artifacts 2022-04-11 10:02:54 -05:00
Kasey Kirkham
ece54828d4 fix some broken tests, use fixed error vals 2022-04-11 10:01:41 -05:00
Kasey Kirkham
afe6c5e6cd checkpoint sync end-to-end test 2022-04-11 10:01:40 -05:00
Kasey Kirkham
08aff8f60f zpage server to help e2e debugging 2022-04-11 09:29:56 -05:00
Taranpreet26311
6903d52dde Added horusec (#10499)
* Added horusec

* Improving aesthetics

* Restrict branch
2022-04-08 10:52:04 -04:00
Potuz
ac8d27bcf1 Deal with node balance underflow (#10492)
* deal with balance underflow

* fix log format

* preston's review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-07 17:38:48 +00:00
Nishant Das
8d6afb3afd Fix Issues From Multiclient E2E (#10486)
* fix it

* fix index addition

* fix sync issues

* make it nicer
2022-04-07 01:52:24 +00:00
james-prysm
d51f716675 web3signer: fixes json altair block (#10490)
* fix

* fixing naming
2022-04-07 08:24:30 +08:00
terence tsao
0411b7eceb Payload ID caching (#10481) 2022-04-06 16:36:52 -07:00
Radosław Kapka
8dfc80187d Fix receipts root field name in API Middleware (#10488)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-06 21:56:11 +00:00
terence tsao
3833f78803 Remove invalid nodes (#10399) 2022-04-06 14:24:00 -07:00
Potuz
83a83279d4 Remove synced tips and use last valid hash (#10439)
* Remove synced tips

use last valid hash in removing invalid nodes.

* add test

* Remove unused code

* More unused parameters

* Fix proposer boost

* terence's review #1

* Fix conflicts

* terence's review 2

* rename argument

* terence's review #3

* rename optimistic -> status

* Minor clean up

* revert loop variable change

* do not mark lvh as valid

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-06 14:18:30 +00:00
terence tsao
bdab34fd01 Use correct head for notifyForkchoiceUpdate (#10485)
* Use correct head for notifyForkchoiceUpdate

* Fix existing tests

* Update receive_attestation_test.go
2022-04-06 19:05:53 +08:00
kasey
de0143e036 save origin block root before finalize (#10463)
* save origin block root before finalize

* add test for SaveOrigin

* goimports :(

* signature to LoadGenesis changed in a diff PR

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-04-05 17:13:53 +00:00
Nishant Das
2a7a09b112 Add Lock Analyzer (#10430)
* add lock analyzer

* fix locks

* progress

* fix failures

* fix error log

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-04-05 16:39:48 +00:00
terence tsao
984575ed57 Optimistic check: handle zeros check point root (#10464)
* Handle zero roots

* Update chain_info_test.go

* Update chain_info_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-05 14:37:25 +00:00
Nishant Das
927e338f9e Enable Bellatrix E2E (#10437)
* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* add in all the fixes

* fix evaluator bugs

* latest fixes

* sum

* fix to be configurable

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* fix broken conditional checks

* fix all issues

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Revert "fix broken conditional checks"

This reverts commit e118db6c20.

* LH multiclient working

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* fix race

* fix test

* Fix base fee per gas

* Fix notifypayload headroot

* tx fuzzer

* clean up with develop branch

* save progress

* 200tx/block

* add LH flags

* Sync with devleop

* cleanup

* cleanup

* hash

* fix build

* fix test

* fix go check

* fmt

* gosec

* add deps

* cleanup

* fix up

* change gas price

* remove flag

* last fix

* use new LH version

* fix up

* fix finalized block panic

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-04-05 14:02:46 +00:00
terence tsao
f44c99d92a Add blinded beacon block protobufs and wrappers (#10473)
Co-authored-by: rkapka <rkapka@wp.pl>
2022-04-05 14:57:19 +02:00
terence tsao
7d669f23ab Sync: process pending block with optimistic parent (#10480) 2022-04-04 18:46:05 -07:00
terence tsao
9b64c33bd1 RPC: GetValidatorPerformance for Bellatrix (#10482) 2022-04-04 19:50:57 -04:00
Potuz
defa602e50 Adapt Doppelganger to Altair (#9969)
Co-authored-by: rkapka <rkapka@wp.pl>
2022-04-04 15:55:55 +02:00
Radosław Kapka
67c8776f3c Fix execution payload field names in API Middleware (#10479) 2022-04-04 13:19:31 +02:00
Mohamed Zahoor
896d186e3b process the optimistic blocks whose parent are optimistic too (#10350)
* process the optimistic blocks whose parent are optimistic too

* adding unit test

* fix test

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-04-03 23:57:52 -03:00
Raul Jordan
edb98cf499 Move Engine API to Powchain and Consolidate Connection Management (#10438)
* gazelle

* tests passing

* use the same engine client

* pass

* initialize in right place

* erge

* build

* imports

* ensure engine checks work

* pass powchain tests

* powchain tests pass

* deepsource

* fix up node issues

* gaz

* endpoint use

* baz

* b

* conf

* lint

* gaz

* move to start function

* test pass
2022-04-01 14:04:24 -04:00
Potuz
ce15823f8d Save head after FCU (#10466)
* Update head after FCU

* rename function

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-01 00:08:01 +00:00
terence tsao
7cdc741b2f Sync: don't set block to bad due to timeout (#10470)
* Add filter error and tests

* Update BUILD.bazel

* Kasey's feedback
2022-03-31 23:29:27 +00:00
terence tsao
5704fb34be Remove duplicated engine mock (#10472)
* Rm mock

* udpate

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-03-31 19:14:09 -04:00
Preston Van Loon
3e2a037d42 github workflows: pin go version to 1.17 (#10471)
* github workflows: pin go version to 1.17

* Update go.yml

* Revert "Update go.yml"

This reverts commit 4a2d36d05d.

* pin golangci-lint

* try go 1.17

* Revert "Revert "Update go.yml""

This reverts commit 8a89663874.

* move and increase version of checkout

* attempt to ignore export path

* try with entrypoint only

* try some rearranging of stuff

* Split up jobs

* Use hack mentioned in https://github.com/securego/gosec/issues/469\#issuecomment-643823092

* Delete dappnode release trigger

* rm id

* try pin golangci-lint version

* try pin golangci-lint version

* Do not provide a specific go version and lets see what happens

* comment checkout, wtf is wrong with github actions

* it works locally...

* trying with some cache key for lint...

* Revert "trying with some cache key for lint..."

This reverts commit c4f5ae4495.

* try tellign it to skip go installation

* revert commented line, do something to satisify deepsource

* do something to satisify deepsource
2022-03-31 22:16:14 +00:00
Potuz
177f9ccab0 Return historical non-canonical blocks as optimistic (#10446)
* Return historical non-canonical blocks as optimistic

* terence's review
2022-03-31 12:51:14 +00:00
Potuz
649dee532f insert block to forkchoice after notifyNewPayload (#10453)
* insert block to forkchoice after notifyNewPayload

* Add optimistic candidate check

* Terence's review

* Apply suggestions from code review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-31 09:17:49 -03:00
terence tsao
4b3364ac6b Log terminal total difficulty status (#10457)
* Log terminal total diff status

* Update check_transition_config.go

* Update BUILD.bazel

* Update check_transition_config.go

* Update check_transition_config.go

* Update check_transition_config.go
2022-03-31 03:05:23 +00:00
kasey
0df8d7f0c0 refactor genesis state flag handling, support url (#10449)
* refactor genesis state flag handling, support url

* lint fix

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-30 22:23:34 +00:00
Mike Neuder
ade7d705ec Cleanup of Keymanager Deleter interface (#10415)
* Migrating Keymanager account list functionality into each keymanager type

* Addressing review comments

* Adding newline at end of BUILD.bazel

* bazel run //:gazelle -- fix

* account deleter cleanup

* bazel run //:gazelle -- fix

* remove stale logging statement

* adding deleter interface to mock functions

* fixing deepsource findings

* go.sum

* bazel run //:gazelle -- fix

* go mod t-dy -compat=1.17

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-03-30 16:52:54 -05:00
Preston Van Loon
c68894b77d Spectest: Improve test size and timeouts (#10461)
* Adjust test sizes

* Improve the bazel test attributes for forkchoice and other tests

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-30 20:04:42 +00:00
terence tsao
fe98d69c0a Clean up blockchain pkg (#10452)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-30 17:45:24 +00:00
terence tsao
7516bc0316 Fix execution block's base fee endianess marshal/unmarshal (#10459) 2022-03-30 10:17:18 -07:00
Radosław Kapka
be6f3892e8 Align Beacon API with 2.2.0 spec (#10455) 2022-03-30 15:17:19 +00:00
Potuz
588605ceeb Remove TODOs that were already taken care of (#10454) 2022-03-30 13:24:02 +00:00
terence tsao
59b9519284 Forkchoice spec test: set boost with deterministic timing (#10428)
* Set boost with deterministic timing

* confs

* gaz

* Update runner.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-29 19:48:33 +00:00
Nishant Das
d4038fb752 add rlock (#10444) 2022-03-29 06:55:41 -07:00
Raul Jordan
56ab5872bb Add Histogram Response Buckets to Engine API Methods (#10414)
* gaz

* defer payload func

* amend

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 22:17:31 +00:00
Raul Jordan
eb6d68a4b1 Deterministic Proposer Root Boosting in Forkchoice (#10427)
* begin refactor

* fix doubly linked tree tests

* pass

* fix up

* gaz

* buidl

* buidl more

* comment

* args check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 21:34:41 +00:00
kasey
7920528ede Checkpoint Sync 4/5 - enable checkpoint sync to be used by beacon node (#10386)
* enable checkpoint sync in beacon node

* lint fix

* rm unused error

* addressing PR feedback from Radek

* consistent slice -> fixed conversion

Co-authored-by: kasey <kasey@users.noreply.github.com>
2022-03-28 21:01:55 +00:00
Raul Jordan
1af3c07ec5 Stop Checking Transition Configuration Post-Merge (#10413)
* no longer check transition config after the merge

* nil check

* better payload check

* stop checking post merge based on received blocks

* add proper test

* gaz

* fix test

* lint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 17:14:02 +00:00
terence tsao
7279349ae2 Cleanup helpers in beacon-chain/core/blocks/payload.go (#10435)
* less fragile check for execution payload

* gaz

* gaz

* include field

* clean up helpers for checking merge status

* Update beacon-chain/core/blocks/payload.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update payload.go

* Rename

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-28 15:25:49 +00:00
Potuz
2d60d04b57 Update LastValidatedCheckpoint on DB (#10412)
* Update LastValidatedCheckpoint on DB

* fix conflicts
2022-03-28 14:48:14 +00:00
Nishant Das
ef8bc97d3e more efficient (#10440) 2022-03-28 20:27:14 +08:00
Nishant Das
071f6de559 Fix Beacon API Responses For Bellatrix (#10436)
* fix bugs

* fix build

* simplify it
2022-03-26 07:34:29 +00:00
Preston Van Loon
b697463da9 wrapper: Remove deprecated fork specific wrapper methods for signed beacon blocks (#10369)
* Remove deprecated wrappers, lots of refactoring

* Revert proto/prysm/v1alpha1/validator.proto

* fix tests

* fix test

* fix conversion in e2e

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 23:00:44 +00:00
kasey
bfbf693660 Checkpoint Sync 3/5 - beacon node api client lib and prysmctl cli tool (#10385)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-25 17:18:03 +00:00
Radosław Kapka
ed07f4bd77 Add execution_optimistic field to API responses (#10389)
* protos

* grpc

* middleware

* event protos

* event backend

* event middleware

* fix tests

* use `IsOptimisticForRoot` with proper block instead of header

* better algorithm

* return proper error

* review

* optimistic return on bellatrix proposal

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 16:31:50 +00:00
terence tsao
1922416cac Use correct pre state to call new payload (#10416)
* Update todo strings

* Go fmt

* add merge specific checks when receiving a block from gossip

* Fix beacon chain build

* Interop merge beacon state

* fix finding Transactions size

* Update go commit

* Merge union debugging (#9751)

* changes test cases per ssz changes

* noisy commit, restoring pb field order codegen

* get rid of codegen garbage

* M2 works with Geth  🎉

* Fix bazel build //...

* restoring generated pb field ordering

* defensive nil check

* separate ExecutionPayload/Header from codegen

* tell bazel about this new file

* Merge: support terminal difficulty override (#9769)

* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* Fix base fee per gas

* Fix notifypayload headroot

* clean up with develop branch

* Sync with devleop

* Fix

* Fix

* fix

* Fix

* fix tests

* revert some changes

* fix tests

* Update optimistic_sync_test.go

* Simplify IsExecutionEnabledUsingHeader

Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 16:03:15 +00:00
Potuz
a5dc40393d Allow inner protoarray nodes to become VALID (#10418)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 15:35:44 +00:00
Raul Jordan
b238235210 prevent run (#10432) 2022-03-25 11:08:38 -04:00
terence tsao
73a6a85069 Ensure finalized root can't be zeros (#10422) 2022-03-24 23:55:54 +00:00
terence tsao
ac4a30588f Properly initialize replayer builder for rpc (#10426) 2022-03-24 23:12:47 +00:00
Raul Jordan
3579551f15 Use --http-web3provider for Execution Engine Connection (#10307)
* combine endpoints

* initialization in the start function instead

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-03-24 20:53:07 +00:00
417 changed files with 15812 additions and 6082 deletions

View File

@@ -1,8 +1,8 @@
#!/bin/sh -l
set -e
export PATH=$PATH:/usr/local/go/bin
export PATH="$PATH:/usr/local/go/bin"
cd $GITHUB_WORKSPACE
cd "$GITHUB_WORKSPACE"
cp go.mod go.mod.orig
cp go.sum go.sum.orig

View File

@@ -1,41 +0,0 @@
name: Update DAppNodePackages
on:
push:
tags:
- '*'
jobs:
dappnode-update-beacon-chain:
name: Trigger a beacon-chain release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage-prysm-beacon-chain
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-beacon-chain
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches
dappnode-update-validator:
name: Trigger a validator release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage validator repository
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-validator
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches

View File

@@ -7,13 +7,12 @@ on:
branches: [ '*' ]
jobs:
check:
name: Check
formatting:
name: Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@v2
- name: Go mod tidy checker
id: gomodtidy
@@ -31,15 +30,43 @@ jobs:
with:
goimports-path: ./
- name: Gosec security scanner
uses: securego/gosec@master
gosec:
name: Gosec scan
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
uses: actions/setup-go@v3
with:
args: '-exclude=G307 -exclude-dir=crypto/bls/herumi ./...'
go-version: 1.17
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@latest
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
uses: actions/setup-go@v3
with:
go-version: 1.17
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v2
with:
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.17
version: v1.45.2
skip-go-installation: true
build:
name: Build
@@ -48,7 +75,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.14
go-version: 1.17
id: go
- name: Check out code into the Go module directory

22
.github/workflows/horusec.yaml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -0,0 +1,54 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_mod//semver:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"checkpoint_test.go",
"client_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -0,0 +1,262 @@
package beacon
import (
"context"
"fmt"
"path"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
wsd *WeakSubjectivityData
sb []byte
bb []byte
st state.BeaconState
b block.SignedBeaconBlock
cf *detect.VersionedUnmarshaler
}
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch for the
// SignedBeaconBlock value found by DownloadOriginData.
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (od *OriginData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", od.wsd.BlockRoot, od.wsd.Epoch)
}
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
return statePath, file.WriteFile(statePath, od.StateBytes())
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
func (od *OriginData) StateBytes() []byte {
return od.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
func (od *OriginData) BlockBytes() []byte {
return od.bb
}
func fname(prefix string, cf *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.Config.ConfigName, version.String(cf.Fork), slot, root)
}
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err
}
cf, err := detect.FromState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
headState, err := cf.UnmarshalBeaconState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
}
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, cf.Config)
if err != nil {
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
}
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
return epoch, nil
}
const (
prysmMinimumVersion = "v2.0.7"
prysmImplementationName = "Prysm"
)
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
// for older endpoints or clients that do not support the weak_subjectivity api method
// we gather the necessary data for a checkpoint sync by:
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
// - requesting the state at the first slot of the epoch
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
// - requesting that block by its root
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
nv, err := client.GetNodeVersion(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
}
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
}
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
}
// use first slot of the epoch for the state slot
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
// get the state at the first slot of the epoch
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
cf, err := detect.FromState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
st, err := cf.UnmarshalBeaconState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
// compute state and block roots
stateRoot, err := st.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
}
header := st.LatestBlockHeader()
header.StateRoot = stateRoot[:]
computedBlockRoot, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %d", computedBlockRoot)
}
block, err := cf.UnmarshalBeaconBlock(blockBytes)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
blockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
}
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
log.Printf("BeaconBlock root computed from state=%#x, Block htr=%#x", computedBlockRoot, blockRoot)
return &OriginData{
wsd: &WeakSubjectivityData{
BlockRoot: blockRoot,
StateRoot: stateRoot,
Epoch: epoch,
},
st: st,
sb: stateBytes,
b: block,
bb: blockBytes,
cf: cf,
}, nil
}
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
// different technique where we first download the head state which can be used to compute the
// weak subjectivity epoch on the client side.
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method
return downloadBackwardsCompatible(ctx, client)
}
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
// use first slot of the epoch for the block slot
slot, err := slots.EpochStart(ws.Epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", ws.Epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
cf, err := detect.FromState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
state, err := cf.UnmarshalBeaconState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
stateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
}
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
}
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
block, err := cf.UnmarshalBeaconBlock(blockBytes)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
realBlockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
return &OriginData{
wsd: ws,
st: state,
b: block,
sb: stateBytes,
bb: blockBytes,
cf: cf,
}, nil
}

View File

@@ -0,0 +1,407 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/network/forks"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/testing/require"
)
type testRT struct {
rt func(*http.Request) (*http.Response, error)
}
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
if rt.rt != nil {
return rt.rt(req)
}
return nil, errors.New("RoundTripper not implemented")
}
var _ http.RoundTripper = &testRT{}
func marshalToEnvelope(val interface{}) ([]byte, error) {
raw, err := json.Marshal(val)
if err != nil {
return nil, errors.Wrap(err, "error marshaling value to place in data envelope")
}
env := struct {
Data json.RawMessage `json:"data"`
}{
Data: raw,
}
return json.Marshal(env)
}
func TestMarshalToEnvelope(t *testing.T) {
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
require.Equal(t, expected, string(encoded))
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
host: "localhost:3500",
scheme: "http",
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
ctx := context.Background()
_, err := DownloadOriginData(ctx, c)
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
}
func TestFname(t *testing.T) {
vu := &detect.VersionedUnmarshaler{
Config: params.MainnetConfig(),
Fork: version.Phase0,
}
slot := types.Slot(23)
prefix := "block"
var root [32]byte
copy(root[:], []byte{0x23, 0x23, 0x23})
expected := "block_mainnet_phase0_23-0x2323230000000000000000000000000000000000000000000000000000000000.ssz"
actual := fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
vu.Config = params.MinimalSpecConfig()
vu.Fork = version.Altair
slot = 17
prefix = "state"
copy(root[29:], []byte{0x17, 0x17, 0x17})
expected = "state_minimal_altair_17-0x2323230000000000000000000000000000000000000000000000000000171717.ssz"
actual = fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
}
func TestDownloadOriginData(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(epoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
require.NoError(t, wrapper.SetProposerIndex(b, 0))
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
expectedWSD := WeakSubjectivityData{
BlockRoot: bRoot,
StateRoot: wRoot,
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
od, err := DownloadOriginData(ctx, c)
require.NoError(t, err)
require.Equal(t, expectedWSD.Epoch, od.wsd.Epoch)
require.Equal(t, expectedWSD.StateRoot, od.wsd.StateRoot)
require.Equal(t, expectedWSD.BlockRoot, od.wsd.BlockRoot)
require.DeepEqual(t, wsSerialized, od.sb)
require.DeepEqual(t, serBlock, od.bb)
require.DeepEqual(t, wst.Fork().CurrentVersion, od.cf.Version[:])
require.DeepEqual(t, version.Phase0, od.cf.Fork)
}
// runs downloadBackwardsCompatible directly
// and via DownloadOriginData with a round tripper that triggers the backwards compatible code path
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
st, expectedEpoch := defaultTestHeadState(t, cfg)
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(expectedEpoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
require.NoError(t, wrapper.SetProposerIndex(b, 0))
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
odPub, err := DownloadOriginData(ctx, c)
require.NoError(t, err)
odPriv, err := downloadBackwardsCompatible(ctx, c)
require.NoError(t, err)
require.DeepEqual(t, odPriv.wsd, odPub.wsd)
require.DeepEqual(t, odPriv.sb, odPub.sb)
require.DeepEqual(t, odPriv.bb, odPub.bb)
require.DeepEqual(t, odPriv.cf.Fork, odPub.cf.Fork)
require.DeepEqual(t, odPriv.cf.Version, odPub.cf.Version)
}
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
return nil, err
}
prevVersion, err := os.Previous(currentVersion)
if err != nil {
if !errors.Is(err, forks.ErrNoPreviousVersion) {
return nil, err
}
// use same version for both in the case of genesis
prevVersion = currentVersion
}
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
return &ethpb.Fork{
PreviousVersion: prevVersion[:],
CurrentVersion: currentVersion[:],
Epoch: forkEpoch,
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, types.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
slot, err := slots.EpochStart(cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetSlot(slot))
var validatorCount, avgBalance uint64 = 100, 35
require.NoError(t, populateValidators(cfg, st, validatorCount, avgBalance))
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: fork.Epoch - 10,
Root: make([]byte, 32),
}))
// to see the math for this, look at helpers.LatestWeakSubjectivityEpoch
// and for the values use mainnet config values, the validatorCount and avgBalance above, and altair fork epoch
expectedEpoch := slots.ToEpoch(st.Slot()) - 224
return st, expectedEpoch
}
// TODO(10429): refactor beacon state options in testing/util to take a state.BeaconState so this can become an option
func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, valCount, avgBalance uint64) error {
validators := make([]*ethpb.Validator, valCount)
balances := make([]uint64, len(validators))
for i := uint64(0); i < valCount; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, cfg.BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: avgBalance * 1e9,
ExitEpoch: cfg.FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
if err := st.SetValidators(validators); err != nil {
return err
}
if err := st.SetBalances(balances); err != nil {
return err
}
return nil
}

446
api/client/beacon/client.go Normal file
View File

@@ -0,0 +1,446 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/network/forks"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
//
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>."
type StateOrBlockId string
const (
IdFinalized StateOrBlockId = "finalized"
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
IdJustified StateOrBlockId = "justified"
)
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
return StateOrBlockId(fmt.Sprintf("%#x", r))
}
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s types.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
}
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
func idTemplate(ts string) func(StateOrBlockId) string {
t := template.Must(template.New("").Parse(ts))
f := func(id StateOrBlockId) string {
b := bytes.NewBuffer(nil)
err := t.Execute(b, struct{ Id string }{Id: string(id)})
if err != nil {
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
}
return b.String()
}
// run the template to ensure that it is valid
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
_ = f(IdGenesis)
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
host string
scheme string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
host, err := validHostname(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
scheme: "http",
host: host,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func validHostname(h string) (string, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u.Host, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%s", host, port), nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
u := &url.URL{
Scheme: c.scheme,
Host: c.host,
}
return u.String()
}
func (c *Client) urlForPath(methodPath string) *url.URL {
u := &url.URL{
Scheme: c.scheme,
Host: c.host,
}
u.Path = path.Join(u.Path, methodPath)
return u
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.urlForPath(path)
log.Printf("requesting %s", u.String())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
return b, nil
}
var getBlockRootTpl = idTemplate(getBlockRootPath)
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
jsonr := &struct{ Data struct{ Root string } }{}
err = json.Unmarshal(b, jsonr)
if err != nil {
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
}
rs, err := hexutil.Decode(jsonr.Data.Root)
if err != nil {
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
}
return bytesutil.ToBytes32(rs), nil
}
var getForkTpl = idTemplate(getForkForStatePath)
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
fr := &forkResponse{}
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
err = json.Unmarshal(body, dataWrapper)
if err != nil {
return nil, errors.Wrap(err, "error decoding json response in GetFork")
}
return fr.Fork()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
type NodeVersion struct {
implementation string
semver string
systemInfo string
}
var versionRE = regexp.MustCompile(`^(\w+)\/(v\d+\.\d+\.\d+) \((.*)\)$`)
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
semver: groups[2],
systemInfo: groups[3],
}, nil
}
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
d := struct {
Data struct {
Version string `json:"version"`
} `json:"data"`
}{}
err = json.Unmarshal(b, &d)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
}
return parseNodeVersion(d.Data.Version)
}
func renderGetStatePath(id StateOrBlockId) string {
return path.Join(getStatePath, string(id))
}
// GetState retrieves the BeaconState for the given state id.
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
return b, nil
}
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
// This api method does the following:
// - computes weak subjectivity epoch
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
v := &apimiddleware.WeakSubjectivityResponse{}
err = json.Unmarshal(body, v)
if err != nil {
return nil, err
}
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
if err != nil {
return nil, err
}
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
if err != nil {
return nil, err
}
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
if err != nil {
return nil, err
}
return &WeakSubjectivityData{
Epoch: types.Epoch(epoch),
BlockRoot: bytesutil.ToBytes32(blockRoot),
StateRoot: bytesutil.ToBytes32(stateRoot),
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
// a new Beacon Node using Checkpoint Sync.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch types.Epoch
}
func non200Err(response *http.Response) error {
bodyBytes, err := ioutil.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`
Epoch string `json:"epoch"`
}
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
if err != nil {
return nil, err
}
cSlice, err := hexutil.Decode(f.CurrentVersion)
if err != nil {
return nil, err
}
if len(cSlice) != 4 {
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
}
pSlice, err := hexutil.Decode(f.PreviousVersion)
if err != nil {
return nil, err
}
if len(pSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
}
return &ethpb.Fork{
CurrentVersion: cSlice,
PreviousVersion: pSlice,
Epoch: types.Epoch(epoch),
}, nil
}
type forkScheduleResponse struct {
Data []forkResponse
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
if err != nil {
return nil, err
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: types.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -0,0 +1,58 @@
package beacon
import (
"testing"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestParseNodeVersion(t *testing.T) {
cases := []struct {
name string
v string
err error
nv *NodeVersion
}{
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
},
{
name: "implementation and semver only",
v: "Prysm/v2.0.6",
err: ErrInvalidNodeVersion,
},
{
name: "complete version",
v: "Prysm/v2.0.6 (linux amd64)",
nv: &NodeVersion{
implementation: "Prysm",
semver: "v2.0.6",
systemInfo: "linux amd64",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
nv, err := parseNodeVersion(c.v)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
require.DeepEqual(t, c.nv, nv)
}
})
}
}

6
api/client/beacon/doc.go Normal file
View File

@@ -0,0 +1,6 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -0,0 +1,13 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -48,15 +48,16 @@ go_library(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
@@ -106,7 +107,6 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_engine_test.go",
"mock_test.go",
"optimistic_sync_test.go",
"pow_block_test.go",
@@ -130,6 +130,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/fieldparams:go_default_library",
@@ -183,6 +184,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -328,10 +328,10 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return s.IsOptimisticForRoot(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
if err == nil {
return optimistic, nil
}
@@ -358,11 +358,26 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
}
return ss.Slot > lastValidated.Slot, nil
if lastValidated == nil {
return false, errInvalidNilSummary
}
if ss.Slot > lastValidated.Slot {
return true, nil
}
isCanonical, err := s.IsCanonical(ctx, root)
if err != nil {
return false, err
}
// historical non-canonical blocks here are returned as optimistic for safety.
return !isCanonical, nil
}
// SetGenesisTime sets the genesis time of beacon chain.

View File

@@ -9,6 +9,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestHeadSlot_DataRace(t *testing.T) {
@@ -16,10 +17,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
s.HeadSlot()
<-wait
@@ -31,28 +35,38 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err := s.HeadRoot(context.Background())
_, err = s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := wrapper.WrappedSignedBeaconBlock(&ethpb.SignedBeaconBlock{})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
head: &head{block: wsb},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err := s.HeadBlock(context.Background())
_, err = s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
@@ -62,12 +76,16 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
_, err := s.HeadState(context.Background())
_, err = s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -133,7 +133,9 @@ func TestHeadRoot_UseDB(t *testing.T) {
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
@@ -146,8 +148,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
b.Block.Slot = 1
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
c := &Service{}
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
c.head = &head{block: wsb, state: s}
recevied, err := c.HeadBlock(context.Background())
require.NoError(t, err)
@@ -229,7 +233,9 @@ func TestIsCanonical_Ok(t *testing.T) {
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
@@ -447,36 +453,63 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
@@ -485,20 +518,90 @@ func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
@@ -511,5 +614,6 @@ func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
require.Equal(t, true, validated)
}

View File

@@ -13,7 +13,9 @@ var (
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
// errWrongBlockCound is returned when the wrong number of blocks or
// errWrongBlockCount is returned when the wrong number of blocks or
// block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// block is not a valid optimistic candidate block
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
)

View File

@@ -24,8 +24,9 @@ import (
"go.opencensus.io/trace"
)
// UpdateHeadWithBalances updates the beacon state head after getting justified balanced from cache.
func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
cp := s.store.JustifiedCheckpt()
if cp == nil {
return errors.New("no justified checkpoint")
@@ -35,8 +36,19 @@ func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", cp.Root)
return errors.Wrap(err, msg)
}
return s.updateHead(ctx, balances)
headRoot, err := s.updateHead(ctx, balances)
if err != nil {
return errors.Wrap(err, "could not update head")
}
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
return s.saveHead(ctx, headRoot, headBlock, headState)
}
// This defines the current chain service's view of head.
@@ -49,18 +61,18 @@ type head struct {
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// Get head from the fork choice service.
f := s.store.FinalizedCheckpt()
if f == nil {
return errNilFinalizedInStore
return [32]byte{}, errNilFinalizedInStore
}
j := s.store.JustifiedCheckpt()
if j == nil {
return errNilJustifiedInStore
return [32]byte{}, errNilJustifiedInStore
}
// To get head before the first justified epoch, the fork choice will start with origin root
// instead of zero hashes.
@@ -76,7 +88,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
return [32]byte{}, err
}
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
@@ -84,22 +96,16 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
}
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return err
return [32]byte{}, err
}
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
// Save head to the local service cache.
return s.saveHead(ctx, headRoot)
return s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -111,6 +117,12 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
return err
}
if headState == nil || headState.IsNil() {
return errors.New("cannot save nil head state")
}
// If the head state is not available, just return nil.
// There's nothing to cache
@@ -118,46 +130,33 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
return nil
}
// Get the new head block from DB.
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
return err
}
// Get the new head state from cached state or DB.
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil || newHeadState.IsNil() {
return errors.New("cannot save nil head state")
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := newHeadBlock.Block().Slot()
newHeadSlot := headBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := newHeadBlock.Block().StateRoot()
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
ExecutionOptimistic: isOptimistic,
},
})
@@ -169,7 +168,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// Cache the new head info.
s.setHead(headRoot, newHeadBlock, newHeadState)
s.setHead(headRoot, headBlock, headState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
@@ -179,7 +178,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -299,6 +298,7 @@ func (s *Service) hasHeadState() bool {
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
@@ -332,6 +332,10 @@ func (s *Service) notifyNewHeadEvent(
return errors.Wrap(err, "could not get duty dependent root")
}
}
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.NewHead,
Data: &ethpbv1.EventHead{
@@ -341,6 +345,7 @@ func (s *Service) notifyNewHeadEvent(
EpochTransition: slots.IsEpochStart(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot,
CurrentDutyDependentRoot: currentDutyDependentRoot,
ExecutionOptimistic: isOptimistic,
},
})
return nil

View File

@@ -9,6 +9,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
@@ -27,8 +29,10 @@ func TestSaveHead_Same(t *testing.T) {
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
require.NoError(t, service.saveHead(context.Background(), r))
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -39,9 +43,10 @@ func TestSaveHead_Different(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
util.NewBeaconBlock()
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
@@ -55,7 +60,9 @@ func TestSaveHead_Different(t *testing.T) {
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
@@ -63,7 +70,7 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -80,9 +87,10 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
@@ -98,7 +106,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
@@ -106,7 +116,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -139,15 +149,19 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
b := util.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
}
func Test_notifyNewHeadEvent(t *testing.T) {
@@ -162,7 +176,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
}
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -197,7 +211,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -233,7 +247,9 @@ func TestSaveOrphanedAtts(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
@@ -259,7 +275,9 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
@@ -267,3 +285,43 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcp := &ethpb.Checkpoint{
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())
}

View File

@@ -1,49 +0,0 @@
package blockchain
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
type mockEngineService struct {
newPayloadError error
forkchoiceError error
blks map[[32]byte]*enginev1.ExecutionBlock
}
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, m.newPayloadError
}
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, m.forkchoiceError
}
func (*mockEngineService) GetPayloadV1(
_ context.Context, _ enginev1.PayloadIDBytes,
) *enginev1.ExecutionPayload {
return nil
}
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
return nil, nil
}
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
return nil
}
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
return nil, nil
}
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
if !ok {
return nil, errors.New("block not found")
}
return blk, nil
}

View File

@@ -86,9 +86,9 @@ func TestService_newSlot(t *testing.T) {
for _, test := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
store := store.New(test.args.justified, test.args.finalized)
store.SetBestJustifiedCheckpt(test.args.bestJustified)
service.store = store
s := store.New(test.args.justified, test.args.finalized)
s.SetBestJustifiedCheckpt(test.args.bestJustified)
service.store = s
require.NoError(t, service.NewSlot(ctx, test.args.slot))
if test.args.shouldEqual {

View File

@@ -5,16 +5,21 @@ import (
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -22,7 +27,7 @@ import (
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
@@ -30,10 +35,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
return nil, errors.New("nil head block")
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
if isPreBellatrix(headBlk.Version()) {
return nil, nil
}
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not determine if block is execution block")
}
@@ -44,36 +46,31 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
finalizedHash, err := s.getFinalizedPayloadHash(ctx, finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if isPreBellatrix(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
return nil, errors.Wrap(err, "could not get finalized payload hash")
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash,
FinalizedBlockHash: finalizedHash[:],
}
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get payload attribute")
}
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
case powchain.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
@@ -83,74 +80,131 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
}
return payloadID, nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, header *ethpb.ExecutionPayloadHeader, postState state.BeaconState, blk block.SignedBeaconBlock, root [32]byte) error {
// getFinalizedPayloadHash returns the finalized payload hash for the given finalized block root.
// It checks the following in order:
// 1. The finalized block exists in db
// 2. The finalized block exists in initial sync block cache
// 3. The finalized block is the weak subjectivity block and exists in db
// Error is returned if the finalized block is not found from above.
func (s *Service) getFinalizedPayloadHash(ctx context.Context, finalizedRoot [32]byte) ([32]byte, error) {
b, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
b = s.getInitSyncBlock(finalizedRoot)
if b != nil {
return getPayloadHash(b.Block())
}
r, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
return [32]byte{}, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
// getPayloadHash returns the payload hash for the input given block.
// zeros are returned if the block is older than bellatrix.
func getPayloadHash(b block.BeaconBlock) ([32]byte, error) {
if blocks.IsPreBellatrixVersion(b.Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := b.Body().ExecutionPayload()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
if postState == nil {
return errors.New("pre and post states must not be nil")
}
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if isPreBellatrix(postState.Version()) {
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := helpers.BeaconBlockIsNil(blk); err != nil {
return err
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.ExecutionEnabled(postState, blk.Block().Body())
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return errors.Wrap(err, "could not determine if execution is enabled")
return false, errors.Wrap(err, "could not determine if execution is enabled")
}
if !enabled {
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
return true, nil
}
payload, err := body.ExecutionPayload()
if err != nil {
return errors.Wrap(err, "could not get execution payload")
return false, errors.Wrap(err, "could not get execution payload")
}
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
case powchain.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"slot": postState.Slot(),
"slot": blk.Block().Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return nil
return false, nil
case powchain.ErrInvalidPayloadStatus:
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
return false, errors.New("could not validate an INVALID payload from execution engine")
default:
return errors.Wrap(err, "could not validate execution payload from execution engine")
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
return errors.Wrap(err, "could not set optimistic status")
}
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preStateVersion) {
if blocks.IsPreBellatrixVersion(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.
// To reach here, the block must have contained a valid payload.
return s.validateMergeBlock(ctx, blk)
return true, s.validateMergeBlock(ctx, blk)
}
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(header, body)
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(preStateHeader, body)
if err != nil {
return errors.Wrap(err, "could not check if merge block is terminal")
return true, errors.Wrap(err, "could not check if merge block is terminal")
}
if !atTransition {
return nil
return true, nil
}
return s.validateMergeBlock(ctx, blk)
}
// isPreBellatrix returns true if input version is before bellatrix fork.
func isPreBellatrix(v int) bool {
return v == version.Phase0 || v == version.Altair
return true, s.validateMergeBlock(ctx, blk)
}
// optimisticCandidateBlock returns true if this block can be optimistically synced.
@@ -160,10 +214,6 @@ func isPreBellatrix(v int) bool {
// if is_execution_block(opt_store.blocks[block.parent_root]):
// return True
//
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// if is_execution_block(opt_store.blocks[justified_root]):
// return True
//
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
// return True
//
@@ -181,21 +231,75 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
return false, errNilParentInDB
}
parentIsExecutionBlock, err := blocks.ExecutionBlock(parent.Block().Body())
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
if err != nil {
return false, err
}
if parentIsExecutionBlock {
return true, nil
return parentIsExecutionBlock, nil
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, nil, 0, nil
}
j := s.store.JustifiedCheckpt()
if j == nil {
return false, errNilJustifiedInStore
}
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
// Get previous randao.
st = st.Copy()
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
return false, err
return false, nil, 0, err
}
return blocks.ExecutionBlock(jBlock.Block().Body())
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return false, nil, 0, nil
}
// Get fee recipient.
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": fieldparams.EthBurnAddressHex,
}).Error("Fee recipient not set. Using burn address")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
default:
feeRecipient = recipient
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
return false, nil, 0, err
}
attr := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
}
return true, attr, proposerID, nil
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
}
return nil
}

View File

@@ -5,9 +5,14 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -18,9 +23,11 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
@@ -41,8 +48,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
state: st,
}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
@@ -131,7 +143,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
newForkchoiceErr: powchain.ErrAcceptedSyncingPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
},
{
@@ -145,7 +157,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
newForkchoiceErr: powchain.ErrInvalidPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
},
@@ -153,9 +165,9 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -181,13 +193,6 @@ func Test_NotifyNewPayload(t *testing.T) {
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
@@ -195,60 +200,84 @@ func Test_NotifyNewPayload(t *testing.T) {
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
BlockNumber: 1,
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
r, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
tests := []struct {
name string
preState state.BeaconState
postState state.BeaconState
blk block.SignedBeaconBlock
newPayloadErr error
errString string
name string
preState state.BeaconState
postState state.BeaconState
isValidPayload bool
blk block.SignedBeaconBlock
newPayloadErr error
errString string
}{
{
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
isValidPayload: true,
},
{
name: "altair post state",
postState: altairState,
preState: altairState,
name: "altair post state",
postState: altairState,
preState: altairState,
isValidPayload: true,
},
{
name: "nil post state",
preState: phase0State,
errString: "pre and post states must not be nil",
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
isValidPayload: false,
},
{
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrAcceptedSyncingPayloadStatus,
isValidPayload: false,
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrInvalidPayloadStatus,
errString: "could not validate an INVALID payload from execution engine",
isValidPayload: false,
},
{
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state, altair block",
postState: bellatrixState,
preState: altairState,
blk: altairBlk,
name: "altair pre state, altair block",
postState: bellatrixState,
preState: altairState,
blk: altairBlk,
isValidPayload: true,
},
{
name: "altair pre state, happy case",
@@ -268,13 +297,15 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
isValidPayload: false,
},
{
name: "not at merge transition",
@@ -301,13 +332,15 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
isValidPayload: false,
},
{
name: "happy case",
@@ -327,20 +360,21 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
service.cfg.ExecutionEngineCaller = e
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
@@ -348,11 +382,14 @@ func Test_NotifyNewPayload(t *testing.T) {
}
root := [32]byte{'a'}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
err = service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk, root)
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
require.NoError(t, err)
isValidPayload, err := service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.isValidPayload, isValidPayload)
}
})
}
@@ -384,25 +421,23 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
service.cfg.ExecutionEngineCaller = e
payload, err := bellatrixState.LatestExecutionPayloadHeader()
require.NoError(t, err)
root := [32]byte{'c'}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 1, root, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
err = service.notifyNewPayload(ctx, bellatrixState.Version(), payload, bellatrixState, bellatrixBlk, root)
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
require.NoError(t, err)
optimistic, err := service.IsOptimisticForRoot(ctx, root)
validated, err := service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, true, validated)
}
func Test_IsOptimisticCandidateBlock(t *testing.T) {
@@ -425,7 +460,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
parentBlk := util.NewBeaconBlockBellatrix()
wrappedParentBlock, err := wrapper.WrappedBellatrixSignedBeaconBlock(parentBlk)
wrappedParentBlock, err := wrapper.WrappedSignedBeaconBlock(parentBlk)
require.NoError(t, err)
parentRoot, err := wrappedParentBlock.Block().HashTreeRoot()
require.NoError(t, err)
@@ -442,7 +477,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
@@ -450,7 +485,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
@@ -462,7 +497,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
@@ -470,7 +505,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
@@ -482,7 +517,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
@@ -490,48 +525,20 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: false,
},
{
name: "shallow block, execution enabled justified chkpt",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: true,
},
}
for _, tt := range tests {
jroot, err := tt.justified.Block().HashTreeRoot()
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
&ethpb.Checkpoint{
Root: jroot[:],
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
@@ -570,10 +577,10 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
BlockNumber: 100,
}
body := &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
block := &ethpb.BeaconBlockBellatrix{Body: body, Slot: 200}
rawSigned := &ethpb.SignedBeaconBlockBellatrix{Block: block}
b := &ethpb.BeaconBlockBellatrix{Body: body, Slot: 200}
rawSigned := &ethpb.SignedBeaconBlockBellatrix{Block: b}
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
blkRoot, err := wr.Block().HashTreeRoot()
@@ -583,10 +590,276 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
childBlock.Block.ParentRoot = blkRoot[:]
// shallow block
childBlock.Block.Slot = 201
wrappedChild, err := wrapper.WrappedBellatrixSignedBeaconBlock(childBlock)
wrappedChild, err := wrapper.WrappedSignedBeaconBlock(childBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
candidate, err := service.optimisticCandidateBlock(ctx, wrappedChild.Block())
require.NoError(t, err)
require.Equal(t, true, candidate)
}
func Test_GetPayloadAttribute(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
require.NoError(t, err)
require.Equal(t, false, hasPayload)
require.Equal(t, types.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := types.ValidatorIndex(1)
slot := types.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
st, _ := util.DeterministicGenesisState(t, 1)
hook := logTest.NewGlobal()
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
stateGen := stategen.New(beaconDB)
fcs := protoarray.New(0, 0, [32]byte{})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wr))
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
params.BeaconConfig().ZeroHash, 0, 0))
genesisSummary := &ethpb.StateSummary{
Root: genesisStateRoot[:],
Slot: 0,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, genesisSummary))
// Get last validated checkpoint
origCheckpoint, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, origCheckpoint))
// Optimistic finalized checkpoint
blk := util.NewBeaconBlock()
blk.Block.Slot = 320
blk.Block.ParentRoot = genesisRoot[:]
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
opRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
opCheckpoint := &ethpb.Checkpoint{
Root: opRoot[:],
Epoch: 10,
}
opStateSummary := &ethpb.StateSummary{
Root: opRoot[:],
Slot: 320,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
params.BeaconConfig().ZeroHash, 10, 10))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
require.DeepEqual(t, origCheckpoint.Root, cp.Root)
require.Equal(t, origCheckpoint.Epoch, cp.Epoch)
// Validated finalized checkpoint
blk = util.NewBeaconBlock()
blk.Block.Slot = 640
blk.Block.ParentRoot = opRoot[:]
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
validRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
validCheckpoint := &ethpb.Checkpoint{
Root: validRoot[:],
Epoch: 20,
}
validSummary := &ethpb.StateSummary{
Root: validRoot[:],
Slot: 640,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
params.BeaconConfig().ZeroHash, 20, 20))
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
cp, err = service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
optimistic, err := service.IsOptimisticForRoot(ctx, validRoot)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
}
func TestService_removeInvalidBlockAndState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Deleting unknown block should not error.
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
// Happy case
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
r1, err := blk1.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 1,
Root: r1[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
r2, err := blk2.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 2,
Root: r2[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
require.Equal(t, false, service.hasBlock(ctx, r1))
require.Equal(t, false, service.hasBlock(ctx, r2))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
has, err := service.cfg.StateGen.HasState(ctx, r1)
require.NoError(t, err)
require.Equal(t, false, has)
has, err = service.cfg.StateGen.HasState(ctx, r2)
require.NoError(t, err)
require.Equal(t, false, has)
}
func TestService_getFinalizedPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Use the block in DB
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
h, err := service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the block in init sync cache
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hello"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
service.initSyncBlocks[r] = blk
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the weak subjectivity sync block
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("howdy"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, r))
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// None of the above should error
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{'a'}))
_, err = service.getFinalizedPayloadHash(ctx, [32]byte{'a'})
require.ErrorContains(t, "does not exist in the db or our cache", err)
}
func TestService_getPayloadHash(t *testing.T) {
// Pre-bellatrix
blk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
h, err := getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, [32]byte{}, h)
// Post bellatrix
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
h, err = getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(bytesutil.PadTo([]byte("hi"), 32)), h)
}

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -11,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -52,7 +52,7 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
}
// WithExecutionEngineCaller to call execution engine.
func WithExecutionEngineCaller(c enginev1.Caller) Option {
func WithExecutionEngineCaller(c powchain.EngineCaller) Option {
return func(s *Service) error {
s.cfg.ExecutionEngineCaller = c
return nil
@@ -67,6 +67,14 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
}
}
// WithProposerIdsCache for proposer id cache.
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
return func(s *Service) error {
s.cfg.ProposerSlotIndexCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {

View File

@@ -9,6 +9,7 @@ import (
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -117,13 +118,13 @@ func Test_validateMergeBlock(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
@@ -158,12 +159,12 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
h := [32]byte{'a'}
p := [32]byte{'b'}
td := "0x1"
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: td,
}
@@ -175,18 +176,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
require.ErrorContains(t, "could not get pow block: block not found", err)
engine.blks[h] = nil
engine.BlockByHashMap[h] = nil
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "pow block is nil", err)
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "1",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
}

View File

@@ -39,13 +39,17 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -56,7 +60,9 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
@@ -145,13 +151,17 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -162,7 +172,9 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
@@ -438,7 +450,9 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
@@ -455,7 +469,9 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
@@ -478,7 +494,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -486,7 +504,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -509,7 +529,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -517,7 +539,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -534,7 +558,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -543,7 +569,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)

View File

@@ -6,11 +6,13 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
@@ -107,18 +109,42 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err != nil {
return err
}
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
}
isValidPayload, err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed)
if err != nil {
return errors.Wrap(err, "could not verify new payload")
}
if !isValidPayload {
candidate, err := s.optimisticCandidateBlock(ctx, b)
if err != nil {
return errors.Wrap(err, "could not check if block is optimistic candidate")
}
if !candidate {
return errNotOptimisticCandidate
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, postState, signed, blockRoot); err != nil {
return errors.Wrap(err, "could not verify new payload")
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
// We add a proposer score boost to fork choice for the block root if applicable, right after
// running a successful state transition for the block.
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
ctx, signed.Block().Slot(), blockRoot, s.genesisTime,
); err != nil {
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: signed.Block().Slot(),
CurrentSlot: slots.SinceGenesis(s.genesisTime),
SecondsIntoSlot: secondsIntoSlot,
}); err != nil {
return err
}
@@ -178,12 +204,24 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
if err := s.updateHead(ctx, balances); err != nil {
headRoot, err := s.updateHead(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return err
}
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not save head")
}
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
@@ -228,14 +266,19 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: signed.Block().StateRoot(),
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: signed.Block().StateRoot(),
ExecutionOptimistic: isOptimistic,
},
})
@@ -256,14 +299,17 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(preState state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader *ethpb.ExecutionPayloadHeader
var err error
preStateVersion := preState.Version()
preStateVersion := st.Version()
switch preStateVersion {
case version.Phase0, version.Altair:
default:
preStateHeader, err = preState.LatestExecutionPayloadHeader()
preStateHeader, err = st.LatestExecutionPayloadHeader()
if err != nil {
return 0, nil, err
}
@@ -308,9 +354,24 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
PublicKeys: []bls.PublicKey{},
Messages: [][32]byte{},
}
type versionAndHeader struct {
version int
header *ethpb.ExecutionPayloadHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
var set *bls.SignatureBatch
boundaries := make(map[[32]byte]state.BeaconState)
for i, b := range blks {
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
}
preVersionAndHeaders[i] = &versionAndHeader{
version: v,
header: h,
}
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, err
@@ -321,6 +382,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
v, h, err = getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
}
postVersionAndHeaders[i] = &versionAndHeader{
version: v,
header: h,
}
sigSet.Join(set)
}
verify, err := sigSet.Verify()
@@ -333,22 +403,40 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// blocks have been verified, add them to forkchoice and call the engine
for i, b := range blks {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
s.saveInitSyncBlock(blockRoots[i], b)
isValidPayload, err := s.notifyNewPayload(ctx,
preVersionAndHeaders[i].version,
postVersionAndHeaders[i].version,
preVersionAndHeaders[i].header,
postVersionAndHeaders[i].header, b)
if err != nil {
return nil, nil, err
}
if !isValidPayload {
candidate, err := s.optimisticCandidateBlock(ctx, b.Block())
if err != nil {
return nil, nil, errors.Wrap(err, "could not check if block is optimistic candidate")
}
if !candidate {
return nil, nil, errNotOptimisticCandidate
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
return nil, nil, err
}
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, preState, b, blockRoots[i]); err != nil {
return nil, nil, err
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
}
}
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
return nil, nil, err
}
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, err
@@ -418,6 +506,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
log.Infof("UpdateCommitteeCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot), slot=%d, epoch=%d", postState.Slot(), coreTime.CurrentEpoch(postState))
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
return err
}
@@ -426,6 +515,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
@@ -441,9 +531,12 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
log.Info("UpdateCommitteeCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
@@ -498,7 +591,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.Be
func getBlockPayloadHash(blk block.BeaconBlock) ([32]byte, error) {
payloadHash := [32]byte{}
if isPreBellatrix(blk.Version()) {
if blocks.IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil
}
payload, err := blk.Body().ExecutionPayload()

View File

@@ -247,10 +247,19 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
}
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil {
return err
}
if !optimistic {
err = s.cfg.BeaconDB.SaveLastValidatedCheckpoint(ctx, cp)
if err != nil {
return err
}
}
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
return nil
}

View File

@@ -18,6 +18,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
@@ -50,7 +51,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -61,7 +64,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
wsb, err = wrapper.WrappedSignedBeaconBlock(random)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
@@ -126,7 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
wsb, err = wrapper.WrappedSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
@@ -147,7 +154,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -158,7 +167,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
wsb, err = wrapper.WrappedSignedBeaconBlock(random)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
@@ -223,7 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
@@ -241,7 +254,13 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, 0, [32]byte{'A'}, time.Now()))
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: [32]byte{'A'},
BlockSlot: types.Slot(0),
CurrentSlot: 0,
SecondsIntoSlot: 0,
}
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0,
params.BeaconConfig().ZeroHash, []uint64{}, 0)
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
@@ -260,13 +279,17 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -278,15 +301,21 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
@@ -314,13 +343,17 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -332,15 +365,21 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
@@ -366,12 +405,14 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(params.BeaconConfig().ZeroHash[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
bState := st.Copy()
@@ -382,15 +423,17 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
for i := 1; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
service.saveInitSyncBlock(root, wsb)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
@@ -427,8 +470,12 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -455,8 +502,12 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -484,8 +535,12 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -512,12 +567,16 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -543,12 +602,16 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -571,12 +634,16 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -590,7 +657,9 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b).Block()))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
@@ -606,7 +675,9 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, err)
signedBlock := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(signedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
@@ -645,7 +716,9 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -656,12 +729,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -686,7 +761,9 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -697,12 +774,15 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -727,7 +807,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -738,12 +820,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -771,7 +856,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -782,12 +869,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -816,7 +906,9 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
@@ -827,23 +919,29 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
@@ -869,7 +967,9 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
@@ -880,23 +980,29 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
@@ -976,7 +1082,9 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
if err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
@@ -1044,7 +1152,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
}
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
@@ -1131,7 +1241,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0))
@@ -1171,14 +1283,18 @@ func TestVerifyBlkDescendant(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b1)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
type args struct {
parentRoot [32]byte
@@ -1242,7 +1358,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
gBlk := util.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(gBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(gBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: gRoot[:]}))
beaconState, _ := util.DeterministicGenesisState(t, 32)
@@ -1318,7 +1436,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1370,7 +1490,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1434,7 +1556,9 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
@@ -1456,7 +1580,9 @@ func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}
@@ -1502,9 +1628,9 @@ func Test_getStateVersionAndPayload(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version, header, err := getStateVersionAndPayload(tt.st)
ver, header, err := getStateVersionAndPayload(tt.st)
require.NoError(t, err)
require.Equal(t, tt.version, version)
require.Equal(t, tt.version, ver)
require.DeepEqual(t, tt.header, header)
})
}

View File

@@ -161,19 +161,19 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
continue
}
prevHead := s.headRoot()
if err := s.updateHead(s.ctx, balances); err != nil {
newHeadRoot, err := s.updateHead(s.ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
s.notifyEngineIfChangedHead(prevHead)
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
}
}
}()
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
if s.headRoot() == prevHead {
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
return
}
finalized := s.store.FinalizedCheckpt()
@@ -181,14 +181,28 @@ func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
_, err := s.notifyForkchoiceUpdate(s.ctx,
s.headBlock().Block(),
s.headRoot(),
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get block from db")
return
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get state from db")
return
}
_, err = s.notifyForkchoiceUpdate(s.ctx,
headState,
newHeadBlock.Block(),
newHeadRoot,
bytesutil.ToBytes32(finalized.Root),
)
if err != nil {
log.WithError(err).Error("could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -6,6 +6,7 @@ import (
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -47,13 +48,17 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -74,13 +79,17 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -125,30 +134,43 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.notifyEngineIfChangedHead(service.headRoot())
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
service.notifyEngineIfChangedHead([32]byte{'a'})
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
b := util.NewBeaconBlock()
b.Block.Slot = 1
wr := wrapper.WrappedPhase0SignedBeaconBlock(b)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
r, err := b.Block.HashTreeRoot()
b.Block.Slot = 2
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r[:], Epoch: 0}
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
root: r,
block: wr,
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead([32]byte{'b'})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
}

View File

@@ -88,7 +88,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
for i, b := range blocks {
blockCopy := b.Copy()
// TODO(10261) check optimistic status
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err

View File

@@ -144,7 +144,9 @@ func TestService_ReceiveBlock(t *testing.T) {
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wsb, root)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -185,7 +187,9 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.ReceiveBlock(ctx, wsb, root))
wg.Done()
}()
wg.Wait()
@@ -261,7 +265,9 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
blks := []block.SignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {
@@ -283,7 +289,9 @@ func TestService_HasInitSyncBlock(t *testing.T) {
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
}
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()))
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
if !s.HasInitSyncBlock(r) {
t.Error("Should have block")
}

View File

@@ -29,7 +29,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
@@ -75,6 +74,7 @@ type config struct {
ChainStartFetcher powchain.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
@@ -88,7 +88,7 @@ type config struct {
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher powchain.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller enginev1.Caller
ExecutionEngineCaller powchain.EngineCaller
}
// NewService instantiates a new block service instance that will
@@ -191,15 +191,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
}
s.store = store.New(justified, finalized)
var store f.ForkChoicer
var f f.ForkChoicer
fRoot := bytesutil.ToBytes32(finalized.Root)
if features.Get().EnableForkChoiceDoublyLinkedTree {
store = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
f = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
} else {
store = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
f = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
}
s.cfg.ForkChoiceStore = store
fb, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
s.cfg.ForkChoiceStore = f
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
@@ -211,7 +211,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not get execution payload hash")
}
fSlot := fb.Block().Slot()
if err := store.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
if err := f.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
@@ -221,7 +221,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := store.SetOptimisticToValid(s.ctx, fRoot); err != nil {
if err := f.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
@@ -257,7 +257,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
// first check if we have started from checkpoint sync and have a root
originRoot, err := s.cfg.BeaconDB.OriginBlockRoot(ctx)
originRoot, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err == nil {
return originRoot, nil
}
@@ -265,7 +265,7 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
}
// we got here because OriginBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
// we got here because OriginCheckpointBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
// so we should have a value for GenesisBlock
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
if err != nil {
@@ -362,9 +362,9 @@ func (s *Service) startFromPOWChain() error {
defer stateSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.ChainStarted {
data, ok := event.Data.(*statefeed.ChainStartedData)
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
return
@@ -440,9 +440,11 @@ func (s *Service) initializeBeaconChain(
s.cfg.ChainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
log.Infof("UpdateCommitteeCache from initializeBeaconChain, slot=%d", genesisState.Slot())
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return nil, err
}
log.Info("UpdateProposerIndicesInCache from initializeBeaconChain")
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}

View File

@@ -6,7 +6,9 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/sirupsen/logrus"
)
@@ -20,8 +22,11 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, err)
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}()
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
}

View File

@@ -24,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
@@ -74,10 +75,14 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
endpoint := "http://127.0.0.1"
ctx := context.Background()
var web3Service *powchain.Service
var err error
srv, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
@@ -144,7 +149,9 @@ func TestChainStartStop_Initialized(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(1))
@@ -179,12 +186,14 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
@@ -250,7 +259,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(0))
@@ -277,7 +288,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -291,7 +304,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
@@ -323,7 +338,9 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -337,7 +354,9 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
ss := &ethpb.StateSummary{
@@ -376,14 +395,18 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedBlock := util.NewBeaconBlock()
finalizedBlock.Block.Slot = finalizedSlot
finalizedBlock.Block.ParentRoot = genesisRoot[:]
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(finalizedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
// Set head slot close to the finalization point, no head sync is triggered.
headBlock := util.NewBeaconBlock()
@@ -391,7 +414,9 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
headState, err := util.NewBeaconState()
require.NoError(t, err)
@@ -426,7 +451,9 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err = headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
@@ -455,7 +482,9 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
newState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)
@@ -472,12 +501,14 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -491,12 +522,14 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -511,10 +544,12 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
cancel: cancel,
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
}
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
@@ -539,9 +574,11 @@ func BenchmarkHasBlockDB(b *testing.B) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
block := util.NewBeaconBlock()
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
r, err := block.Block.HashTreeRoot()
blk := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
b.ResetTimer()
@@ -558,13 +595,15 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -579,13 +618,15 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -24,7 +24,7 @@ type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
// newStateBalanceCache exists to remind us that stateBalanceCache needs a state gen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {

View File

@@ -15,15 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
type mockStateByRoot struct {
state state.BeaconState
err error
}
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
type testStateOpt func(*ethpb.BeaconStateAltair)
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {

View File

@@ -61,6 +61,7 @@ type ChainService struct {
SyncCommitteePubkeys [][]byte
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
ReceiveBlockMockErr error
}
// ForkChoicer mocks the same method in the chain service
@@ -195,32 +196,35 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
if s.State == nil {
return ErrNilState
}
for _, block := range blks {
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
for _, b := range blks {
if !bytes.Equal(s.Root, b.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, b.Block().ParentRoot())
}
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
if err := s.State.SetSlot(b.Block().Slot()); err != nil {
return err
}
s.BlocksReceived = append(s.BlocksReceived, block)
signingRoot, err := block.Block().HashTreeRoot()
s.BlocksReceived = append(s.BlocksReceived, b)
signingRoot, err := b.Block().HashTreeRoot()
if err != nil {
return err
}
if s.DB != nil {
if err := s.DB.SaveBlock(ctx, block); err != nil {
if err := s.DB.SaveBlock(ctx, b); err != nil {
return err
}
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
}
s.Root = signingRoot[:]
s.Block = block
s.Block = b
}
return nil
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
if s.State == nil {
return ErrNilState
}
@@ -417,7 +421,7 @@ func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.
}
// HeadSyncCommitteeIndices mocks HeadSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, _ types.ValidatorIndex, _ types.Slot) ([]types.CommitteeIndex, error) {
return s.SyncCommitteeIndices, nil
}

View File

@@ -36,6 +36,7 @@ func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil
@@ -79,15 +80,17 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
if !v.db.HasBlock(ctx, v.root) {
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
}
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
endSlot := v.slot + params.BeaconConfig().SlotsPerEpoch
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(endSlot)
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
log.Infof("Searching block roots for weak subjectivity root=%#x, between slots %d-%d", v.root, v.slot, endSlot)
roots, err := v.db.BlockRoots(ctx, filter)
if err != nil {
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
}
for _, root := range roots {
if v.root == root {
log.Info("Weak subjectivity check has passed")
log.Info("Weak subjectivity check has passed!!")
v.verified = true
return nil
}

View File

@@ -14,58 +14,65 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
b := util.NewBeaconBlock()
b.Block.Slot = 32
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
b.Block.Slot = 1792480
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
blockEpoch := slots.ToEpoch(b.Block.Slot)
tests := []struct {
wsVerified bool
disabled bool
wantErr error
checkpt *ethpb.Checkpoint
finalizedEpoch types.Epoch
name string
}{
{
name: "nil root and epoch",
},
{
name: "already verified",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 2,
wsVerified: true,
name: "nil root and epoch",
disabled: true,
},
{
name: "not yet to verify, ws epoch higher than finalized epoch",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 1,
checkpt: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: blockEpoch},
finalizedEpoch: blockEpoch - 1,
},
{
name: "can't find the block in DB",
checkpt: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), Epoch: 1},
finalizedEpoch: 3,
finalizedEpoch: blockEpoch + 1,
wantErr: errWSBlockNotFound,
},
{
name: "can't find the block corresponds to ws epoch in DB",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
finalizedEpoch: 3,
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch - 2}, // Root belongs in epoch 1.
finalizedEpoch: blockEpoch - 1,
wantErr: errWSBlockNotFoundInEpoch,
},
{
name: "can verify and pass",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 1},
finalizedEpoch: 3,
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
finalizedEpoch: blockEpoch + 1,
},
{
name: "equal epoch",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
finalizedEpoch: blockEpoch,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},

View File

@@ -13,6 +13,7 @@ go_library(
"common.go",
"doc.go",
"error.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
@@ -26,6 +27,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
@@ -61,6 +63,7 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_id_test.go",
"proposer_indices_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",

73
beacon-chain/cache/payload_id.go vendored Normal file
View File

@@ -0,0 +1,73 @@
package cache
import (
"sync"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[slot]
if !ok {
return 0, [8]byte{}, false
}
vId := ids[:vIdLength]
b := ids[vIdLength:]
var pId [pIdLength]byte
copy(pId[:], b)
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
var bytes [vpIdsLength]byte
copy(bytes[:], append(vIdBytes[:], pId[:]...))
_, ok := f.slotToProposerAndPayloadIDs[slot]
// Ok to overwrite if the slot is already set but the payload ID is not set.
// This combats the re-org case where payload assignment could change the epoch of.
if !ok || (ok && pId != [pIdLength]byte{}) {
f.slotToProposerAndPayloadIDs[slot] = bytes
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
f.Lock()
defer f.Unlock()
for s := range f.slotToProposerAndPayloadIDs {
if slot > s {
delete(f.slotToProposerAndPayloadIDs, s)
}
}
}

60
beacon-chain/cache/payload_id_test.go vendored Normal file
View File

@@ -0,0 +1,60 @@
package cache
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
cache := NewProposerPayloadIDsCache()
i, p, ok := cache.GetProposerPayloadIDs(0)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
slot := types.Slot(1234)
vid := types.ValidatorIndex(34234324)
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
slot = types.Slot(9456456)
vid = types.ValidatorIndex(6786745)
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, [pIdLength]byte{}, p)
// reset cache without pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
// reset cache with existing pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, newPid, p)
// remove cache entry
cache.PrunePayloadIDs(slot + 1)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
}

View File

@@ -48,7 +48,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -79,7 +79,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
time.PrevEpoch(beaconState),
time.CurrentEpoch(beaconState),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -108,13 +108,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
want := "source check point not equal to current justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -149,14 +149,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := "source check point not equal to previous justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -188,7 +188,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, expected, err)
@@ -232,7 +232,7 @@ func TestProcessAttestations_OK(t *testing.T) {
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -421,7 +421,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
}
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
if err != nil && r != nil {

View File

@@ -45,6 +45,7 @@ go_library(
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//proto/prysm/v1alpha1/slashings:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -42,7 +42,9 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
_, err = ProcessBlockHeader(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = ProcessBlockHeader(context.Background(), s, wsb)
_ = err
}
}
@@ -141,7 +143,9 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
fuzzer.Fuzz(b)
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessRandao(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := ProcessRandao(context.Background(), s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}
@@ -260,7 +264,9 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer.Fuzz(b)
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}

View File

@@ -68,7 +68,9 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
assert.ErrorContains(t, "block.Slot 10 must be greater than state.LatestBlockHeader.Slot 10", err)
}
@@ -94,7 +96,9 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
block.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, block.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx+1])
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), beaconState, wsb)
want := "signature did not verify"
assert.ErrorContains(t, want, err)
}
@@ -136,7 +140,9 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
Signature: blockSig,
})
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "is different than block slot"
assert.ErrorContains(t, want, err)
}
@@ -175,7 +181,9 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
block.Block.ParentRoot = bytesutil.PadTo([]byte{'A'}, 32)
block.Signature = blockSig
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "does not match"
assert.ErrorContains(t, want, err)
}
@@ -217,7 +225,9 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
block.Block.ParentRoot = parentRoot[:]
block.Signature = blockSig
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "was previously slashed"
assert.ErrorContains(t, want, err)
}
@@ -266,7 +276,9 @@ func TestProcessBlockHeader_OK(t *testing.T) {
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
newState, err := blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
newState, err := blocks.ProcessBlockHeader(context.Background(), state, wsb)
require.NoError(t, err, "Failed to process block header got")
var zeroHash [32]byte
nsh := newState.LatestBlockHeader()

View File

@@ -2,7 +2,6 @@ package blocks
import (
"bytes"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -14,86 +13,96 @@ import (
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
)
// MergeTransitionComplete returns true if the transition to Bellatrix has completed.
// IsMergeTransitionComplete returns true if the transition to Bellatrix has completed.
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
//
// Spec code:
// def is_merge_transition_complete(state: BeaconState) -> bool:
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
//
// Deprecated: Use `IsMergeTransitionBlockUsingPayloadHeader` instead.
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
if st == nil {
return false, errors.New("nil state")
}
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
}
return !isEmptyHeader(h), nil
}
// MergeTransitionBlock returns true if the input block is the terminal merge block.
// Meaning the header in beacon state is `ExecutionPayloadHeader()` (i.e. empty).
// And the input block has a non-empty header.
//
// Spec code:
// def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
// return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeComplete, err := MergeTransitionComplete(st)
if err != nil {
return false, err
}
if mergeComplete {
return false, err
}
return ExecutionBlock(body)
}
// IsMergeTransitionBlockUsingPayloadHeader returns true if the input block is the terminal merge block.
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
// Terminal merge block must be associated with an empty payload header.
// This is an optimized version of MergeTransitionComplete where beacon state is not required as an argument.
func IsMergeTransitionBlockUsingPayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
// This assumes the header `h` is referenced as the parent state for block body `body.
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
if h == nil || body == nil {
return false, errors.New("nil header or block body")
}
if !isEmptyHeader(h) {
return false, nil
}
return ExecutionBlock(body)
return IsExecutionBlock(body)
}
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
//
// Spec code:
// def is_execution_block(block: BeaconBlock) -> bool:
// return block.body.execution_payload != ExecutionPayload()
func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
func IsExecutionBlock(body block.BeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
}
payload, err := body.ExecutionPayload()
if err != nil {
if strings.HasPrefix(err.Error(), "ExecutionPayload is not supported in") {
return false, nil
}
switch {
case errors.Is(err, wrapper.ErrUnsupportedField):
return false, nil
case err != nil:
return false, err
default:
}
return !isEmptyPayload(payload), nil
}
// ExecutionEnabled returns true if the beacon chain can begin executing.
// IsExecutionEnabled returns true if the beacon chain can begin executing.
// Meaning the payload header is beacon state is non-empty or the payload in block body is non-empty.
//
// Spec code:
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
// return is_merge_block(state, body) or is_merge_complete(state)
func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeBlock, err := MergeTransitionBlock(st, body)
func IsExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
if st == nil || body == nil {
return false, errors.New("nil state or block body")
}
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
}
if mergeBlock {
return IsExecutionEnabledUsingHeader(header, body)
}
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
if !isEmptyHeader(header) {
return true, nil
}
return MergeTransitionComplete(st)
return IsExecutionBlock(body)
}
// IsPreBellatrixVersion returns true if input version is before bellatrix fork.
func IsPreBellatrixVersion(v int) bool {
return v < version.Bellatrix
}
// ValidatePayloadWhenMergeCompletes validates if payload is valid versus input beacon state.
@@ -104,7 +113,7 @@ func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, e
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
complete, err := MergeTransitionComplete(st)
complete, err := IsMergeTransitionComplete(st)
if err != nil {
return err
}
@@ -226,6 +235,9 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
}
func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
if p == nil {
return true
}
if !bytes.Equal(p.ParentHash, make([]byte, fieldparams.RootLength)) {
return false
}

View File

@@ -18,7 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_MergeComplete(t *testing.T) {
func Test_IsMergeComplete(t *testing.T) {
tests := []struct {
name string
payload *ethpb.ExecutionPayloadHeader
@@ -160,7 +160,7 @@ func Test_MergeComplete(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
got, err := blocks.MergeTransitionComplete(st)
got, err := blocks.IsMergeTransitionComplete(st)
require.NoError(t, err)
if got != tt.want {
t.Errorf("mergeComplete() got = %v, want %v", got, tt.want)
@@ -169,187 +169,6 @@ func Test_MergeComplete(t *testing.T) {
}
}
func Test_MergeBlock(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: false,
},
{
name: "empty header, payload has parent hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has fee recipient",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has state root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has receipt root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has logs bloom",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has random",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has base fee",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has tx",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Transactions = [][]byte{{'a'}}
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has extra data",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block number",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockNumber = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas limit",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasLimit = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas used",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasUsed = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has timestamp",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.MergeTransitionBlock(st, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
tests := []struct {
name string
@@ -520,7 +339,7 @@ func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(tt.header, body)
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
@@ -556,20 +375,28 @@ func Test_IsExecutionBlock(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
wrappedBlock, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(t, err)
got, err := blocks.ExecutionBlock(wrappedBlock.Body())
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func Test_ExecutionEnabled(t *testing.T) {
func Test_IsExecutionEnabled(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
useAltairSt bool
want bool
}{
{
name: "use older than bellatrix state",
payload: emptyPayload(),
header: emptyPayloadHeader(),
useAltairSt: true,
want: false,
},
{
name: "empty header, empty payload",
payload: emptyPayload(),
@@ -619,10 +446,76 @@ func Test_ExecutionEnabled(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.ExecutionEnabled(st, body)
if tt.useAltairSt {
st, _ = util.DeterministicGenesisStateAltair(t, 1)
}
got, err := blocks.IsExecutionEnabled(st, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("ExecutionEnabled() got = %v, want %v", got, tt.want)
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "empty header, non-empty payload",
header: emptyPayloadHeader(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
want: true,
},
{
name: "non-empty header, non-empty payload",
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsExecutionEnabledUsingHeader(tt.header, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
}
})
}
@@ -830,7 +723,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := blocks.MergeTransitionComplete(st)
_, err := blocks.IsMergeTransitionComplete(st)
require.NoError(b, err)
}
}

View File

@@ -40,7 +40,9 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
}
want := "block randao: signature did not verify"
_, err = blocks.ProcessRandao(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = blocks.ProcessRandao(context.Background(), beaconState, wsb)
assert.ErrorContains(t, want, err)
}
@@ -57,11 +59,12 @@ func TestProcessRandao_SignatureVerifiesAndUpdatesLatestStateMixes(t *testing.T)
RandaoReveal: epochSignature,
},
}
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
newState, err := blocks.ProcessRandao(
context.Background(),
beaconState,
wrapper.WrappedPhase0SignedBeaconBlock(b),
wsb,
)
require.NoError(t, err, "Unexpected error processing block randao")
currentEpoch := time.CurrentEpoch(beaconState)

View File

@@ -83,7 +83,7 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
assert.NoError(t, err)
sig := keys[0].Sign(rt[:]).Marshal()
altairBlk.Signature = sig
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(altairBlk)
wsb, err := wrapper.WrappedSignedBeaconBlock(altairBlk)
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
}

View File

@@ -13,5 +13,6 @@ const (
// ReceivedBlockData is the data sent with ReceivedBlock events.
type ReceivedBlockData struct {
SignedBlock block.SignedBeaconBlock
SignedBlock block.SignedBeaconBlock
IsOptimistic bool
}

View File

@@ -6,6 +6,7 @@ import (
"bytes"
"context"
"fmt"
log "github.com/sirupsen/logrus"
"sort"
"github.com/pkg/errors"
@@ -175,9 +176,7 @@ func CommitteeAssignments(
return nil, nil, err
}
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
// Proposal epochs do not have a look ahead, so we skip them over here.
validProposalEpoch := epoch < nextEpoch
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
@@ -192,6 +191,15 @@ func CommitteeAssignments(
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
}
// If previous proposer indices computation is outside if current proposal epoch range,
// we need to reset state slot back to start slot so that we can compute the correct committees.
currentProposalEpoch := epoch < nextEpoch
if !currentProposalEpoch {
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
return nil, nil, err
}
}
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, nil, err
@@ -279,40 +287,43 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
//for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
log.Infof("computed seed=%#x for slot=%d", seed, state.Slot())
if committeeCache.HasEntry(string(seed[:])) {
log.Infof("UpdateCommitteeCache: seed=%#x already in cache at slot=%d", seed, state.Slot())
return nil
}
shuffledIndices, err := ShuffledIndices(state, epoch)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
log.Infof("UpdateCommitteeCache: epoch=%d, state.slot=%d, indices=%v, seed=%#x", epoch, state.Slot(), sortedIndices, seed)
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
//}
return nil
}
@@ -356,6 +367,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
log.Infof("UpdateProposerIndicesInCache: state.slot=%d, slot=%d, root=%#x, indices=%v", state.Slot(), s, r, indices)
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
BlockRoot: bytesutil.ToBytes32(r),
ProposerIndices: proposerIndices,

View File

@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
log "github.com/sirupsen/logrus"
)
// Seed returns the randao seed used for shuffling of a given epoch.
@@ -33,6 +34,8 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain
seed32 := hash.Hash(seed)
log.Infof("seed computation params for slot=%d: domain=%#x, epoch=%d, randaoMix=%#x", state.Slot(), domain, epoch, randaoMix)
return seed32, nil
}

View File

@@ -3,6 +3,8 @@ package helpers
import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -15,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
)
@@ -88,11 +89,27 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
if err != nil {
return nil, errors.Wrap(err, "could not get seed")
}
var ci []types.ValidatorIndex
if s.Slot() == 78 {
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
ci = append(ci, types.ValidatorIndex(idx))
}
return nil
}); err != nil {
log.Errorf("got error doing double-check validator index computation=%v", err)
return nil, err
}
}
activeIndices, err := committeeCache.ActiveIndices(ctx, seed)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
}
if activeIndices != nil {
if s.Slot() == 78 {
log.Infof("double check indices for 78, len=%d, low=%d, high=%d", len(ci), ci[0], ci[len(ci)-1])
}
log.Infof("found indices in cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
@@ -106,6 +123,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, errors.New("nil active indices")
}
CommitteeCacheInProgressHit.Inc()
log.Infof("found indices in in-progress cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
return nil, errors.Wrap(err, "could not mark committee cache as in progress")
@@ -126,9 +144,16 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, err
}
log.Infof("computed indices slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(indices), indices[0], indices[len(indices)-1])
log.Infof("UpdateCommitteeCache from ActiveValidatorIndices, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return nil, errors.Wrap(err, "could not update committee cache")
}
/*
if err := UpdateProposerIndicesInCache(ctx, s); err != nil {
return nil, errors.Wrap(err, "failed to update proposer indices cache in ActiveValidatorIndices")
}
*/
return indices, nil
}
@@ -175,6 +200,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
return 0, err
}
log.Infof("UpdateCommitteeCache from ActiveValidatorCount, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -249,6 +275,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
log.Info("UpdateProposerIndicesInCache from BeaconProposerIndex")
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -259,15 +286,19 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
if err != nil {
return 0, errors.Wrap(err, "could not generate seed")
}
fmt.Printf("BeaconProposerIndex:seed=%#x", seed)
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(state.Slot()))...)
fmt.Printf("BeaconProposerIndex:seedWithSlot=%#x", seed)
seedWithSlotHash := hash.Hash(seedWithSlot)
fmt.Printf("BeaconProposerIndex:seedWithSlotHash=%#x", seed)
indices, err := ActiveValidatorIndices(ctx, state, e)
if err != nil {
return 0, errors.Wrap(err, "could not get active indices")
}
log.Infof("validator index length=%d, low=%d, high=%d", len(indices), indices[0], indices[len(indices)-1])
return ComputeProposerIndex(state, indices, seedWithSlotHash)
}

View File

@@ -55,9 +55,9 @@ import (
// )
//
// return ws_period
func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState) (types.Epoch, error) {
func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) {
// Weak subjectivity period cannot be smaller than withdrawal delay.
wsp := uint64(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
wsp := uint64(cfg.MinValidatorWithdrawabilityDelay)
// Cardinality of active validator set.
N, err := ActiveValidatorCount(ctx, st, time.CurrentEpoch(st))
@@ -73,10 +73,10 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
if err != nil {
return 0, fmt.Errorf("cannot find total active balance of validators: %w", err)
}
t = t / N / params.BeaconConfig().GweiPerEth
t = t / N / cfg.GweiPerEth
// Maximum effective balance per validator.
T := params.BeaconConfig().MaxEffectiveBalance / params.BeaconConfig().GweiPerEth
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
// Validator churn limit.
delta, err := ValidatorChurnLimit(N)
@@ -85,14 +85,14 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
}
// Balance top-ups.
Delta := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxDeposits))
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
if delta == 0 || Delta == 0 {
return 0, errors.New("either validator churn limit or balance top-ups is zero")
}
// Safety decay, maximum tolerable loss of safety margin of FFG finality.
D := params.BeaconConfig().SafetyDecay
D := cfg.SafetyDecay
if T*(200+3*D) < t*(200+12*D) {
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
@@ -123,7 +123,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
// return current_epoch <= ws_state_epoch + ws_period
func IsWithinWeakSubjectivityPeriod(
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [fieldparams.RootLength]byte, wsEpoch types.Epoch) (bool, error) {
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [fieldparams.RootLength]byte, wsEpoch types.Epoch, cfg *params.BeaconChainConfig) (bool, error) {
// Make sure that incoming objects are not nil.
if wsState == nil || wsState.IsNil() || wsState.LatestBlockHeader() == nil {
return false, errors.New("invalid weak subjectivity state or checkpoint")
@@ -140,7 +140,7 @@ func IsWithinWeakSubjectivityPeriod(
}
// Compare given epoch to state epoch + weak subjectivity period.
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, wsState)
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, wsState, cfg)
if err != nil {
return false, fmt.Errorf("cannot compute weak subjectivity period: %w", err)
}
@@ -154,8 +154,8 @@ func IsWithinWeakSubjectivityPeriod(
// Within the weak subjectivity period, if two conflicting blocks are finalized, 1/3 - D (D := safety decay)
// of validators will get slashed. Therefore, it is safe to assume that any finalized checkpoint within that
// period is protected by this safety margin.
func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconState) (types.Epoch, error) {
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, st)
func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) {
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, st, cfg)
if err != nil {
return 0, err
}

View File

@@ -48,7 +48,7 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
t.Run(fmt.Sprintf("valCount: %d, avgBalance: %d", tt.valCount, tt.avgBalance), func(t *testing.T) {
// Reset committee cache - as we need to recalculate active validator set for each test.
helpers.ClearCache()
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance))
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig())
require.NoError(t, err)
assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance)
})
@@ -178,7 +178,7 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sr, _, e := tt.genWsCheckpoint()
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e)
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig())
if tt.wantedErr != "" {
assert.Equal(t, false, got)
assert.ErrorContains(t, tt.wantedErr, err)

View File

@@ -90,7 +90,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -101,7 +101,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -178,7 +178,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -190,7 +190,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
@@ -198,7 +198,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)

View File

@@ -92,7 +92,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -103,7 +103,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
wsb, err = wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -180,7 +180,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -192,7 +192,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
wsb, err = wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)

View File

@@ -29,7 +29,9 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb)
require.NoError(b, err)
}
}
@@ -50,12 +52,16 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, beaconState.SetSlot(currentSlot))
// Run the state transition once to populate the cache.
_, err = coreState.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), beaconState, wsb)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
}
}

View File

@@ -32,10 +32,14 @@ func TestSkipSlotCache_OK(t *testing.T) {
// with the state
blk, err := util.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb)
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wsb)
require.NoError(t, err, "Could not process state transition")
assert.DeepEqual(t, originalState.CloneInnerState(), bState.CloneInnerState(), "Skipped slots cache leads to different states")
@@ -57,7 +61,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
// with the state
blk, err := util.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb)
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
@@ -70,7 +76,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
signature, err := util.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
require.NoError(t, err, "Could not run state transition")
}
@@ -81,7 +89,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
signature, err := util.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
require.NoError(t, err, "Could not run state transition")
}

View File

@@ -138,6 +138,7 @@ func ProcessSlotsUsingNextSlotCache(
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
defer span.End()
/*
// Check whether the parent state has been advanced by 1 slot in next slot cache.
nextSlotState, err := NextSlotState(ctx, parentRoot)
if err != nil {
@@ -148,6 +149,11 @@ func ProcessSlotsUsingNextSlotCache(
// We replace next slot state with parent state.
if cachedStateExists {
parentState = nextSlotState
root, err := parentState.HashTreeRoot(ctx)
if err != nil {
log.Errorf("weird, got an error calling HTR for the state=%v where root should be=%#x", err, parentRoot)
}
log.Infof("found state in NextSlotCache at slot=%d with root=%#x (parentRoot=%#x)", parentState.Slot(), root, parentRoot)
}
// In the event our cached state has advanced our
@@ -155,9 +161,12 @@ func ProcessSlotsUsingNextSlotCache(
if cachedStateExists && parentState.Slot() == slot {
return parentState, nil
}
*/
log.Infof("process_slots being called up to slot=%d where state.slot=%d", slot, parentState.Slot())
// Since next slot cache only advances state by 1 slot,
// we check if there's more slots that need to process.
parentState, err = ProcessSlots(ctx, parentState, slot)
parentState, err := ProcessSlots(ctx, parentState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}

View File

@@ -25,7 +25,9 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
s, err := ExecuteStateTransition(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
s, err := ExecuteStateTransition(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for state: %v and signed block: %v", s, err, state, sb)
}
@@ -44,7 +46,9 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
stateRoot, err := CalculateStateRoot(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
stateRoot, err := CalculateStateRoot(ctx, state, wsb)
if err != nil && stateRoot != [32]byte{} {
t.Fatalf("state root should be empty on err. found: %v on error: %v for signed block: %v", stateRoot, err, sb)
}
@@ -99,7 +103,9 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(bb))
wsb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for block body: %v", s, err, bb)
}
@@ -117,7 +123,9 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
_, err := VerifyOperationLengths(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(bb))
wsb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
_, err = VerifyOperationLengths(context.Background(), state, wsb)
_ = err
}
}
@@ -164,7 +172,9 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
s, err := ProcessBlockForStateRoot(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
s, err := ProcessBlockForStateRoot(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for signed block: %v", s, err, sb)
}

View File

@@ -282,23 +282,25 @@ func ProcessBlockForStateRoot(
state, err = b.ProcessBlockHeaderNoVerify(ctx, state, blk.Slot(), blk.ProposerIndex(), blk.ParentRoot(), bodyRoot[:])
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process block header")
r, err := signed.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not process block header, also failed to compute its htr")
}
return nil, errors.Wrapf(err, "could not process block header, state slot=%d, root=%#x", state.Slot(), r)
}
if state.Version() == version.Bellatrix {
enabled, err := b.ExecutionEnabled(state, blk.Body())
enabled, err := b.IsExecutionEnabled(state, blk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not check if execution is enabled")
}
if enabled {
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not check if execution is enabled")
return nil, err
}
if enabled {
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return nil, err
}
state, err = b.ProcessPayload(state, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process execution payload")
}
state, err = b.ProcessPayload(state, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process execution payload")
}
}

View File

@@ -52,7 +52,9 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -61,7 +63,9 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
assert.NoError(t, err)
verified, err := set.Verify()
assert.NoError(t, err)
@@ -104,7 +108,9 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -114,13 +120,17 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
}
func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
// Test Signature set verifies.
verified, err := set.Verify()
@@ -130,7 +140,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -143,7 +153,7 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -153,7 +163,7 @@ func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) {
beaconState, block := createFullBellatrixBlockWithOperations(t)
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -163,7 +173,7 @@ func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) {
func TestCalculateStateRootAltair_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
r, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -173,7 +183,7 @@ func TestCalculateStateRootAltair_OK(t *testing.T) {
func TestProcessBlockDifferentVersion(t *testing.T) {
beaconState, _ := util.DeterministicGenesisState(t, 64) // Phase 0 state
_, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block) // Altair block
wsb, err := wrapper.WrappedSignedBeaconBlock(block) // Altair block
require.NoError(t, err)
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "state and block are different version. 0 != 1", err)

View File

@@ -44,7 +44,9 @@ func TestExecuteStateTransition_IncorrectSlot(t *testing.T) {
},
}
want := "expected state.slot"
_, err = transition.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb)
assert.ErrorContains(t, want, err)
}
@@ -87,7 +89,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -96,7 +100,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, beaconState.Slot(), "Unexpected Slot number")
@@ -183,7 +189,9 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
cp.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wsb)
wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16"
assert.ErrorContains(t, wanted, err)
}
@@ -396,7 +404,9 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) {
len(b.Block.Body.ProposerSlashings), params.BeaconConfig().MaxProposerSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -413,7 +423,9 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -429,7 +441,9 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) {
len(b.Block.Body.Attestations), params.BeaconConfig().MaxAttestations)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -446,7 +460,9 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
len(b.Block.Body.VoluntaryExits), maxExits)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -466,7 +482,9 @@ func TestProcessBlock_IncorrectDeposits(t *testing.T) {
}
want := fmt.Sprintf("incorrect outstanding deposits in block body, wanted: %d, got: %d",
s.Eth1Data().DepositCount-s.Eth1DepositIndex(), len(b.Block.Body.Deposits))
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}

View File

@@ -13,3 +13,9 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot

View File

@@ -27,6 +27,7 @@ type ReadOnlyDatabase interface {
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
HasBlock(ctx context.Context, blockRoot [32]byte) bool
GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, error)
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error)
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error)
@@ -53,7 +54,8 @@ type ReadOnlyDatabase interface {
// Fee reicipients operations.
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
// origin checkpoint sync support
OriginBlockRoot(ctx context.Context) ([32]byte, error)
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -97,12 +99,14 @@ type HeadAccessDatabase interface {
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.
LoadGenesis(ctx context.Context, r io.Reader) error
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, state io.Reader, block io.Reader) error
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -94,6 +94,7 @@ go_test(
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
"wss_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
@@ -101,6 +102,7 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/features:go_default_library",

View File

@@ -21,7 +21,9 @@ func TestStore_Backup(t *testing.T) {
head := util.NewBeaconBlock()
head.Block.Slot = 5000
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(head)))
wsb, err := wrapper.WrappedSignedBeaconBlock(head)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -61,7 +63,9 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
for i := startSlot; i < 5200; i++ {
head := util.NewBeaconBlock()
head.Block.Slot = i
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(head)))
wsb, err := wrapper.WrappedSignedBeaconBlock(head)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
ssz "github.com/ferranbt/fastssz"
"github.com/golang/snappy"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
@@ -47,18 +48,18 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
return blk, err
}
// OriginBlockRoot returns the value written to the db in SaveOriginBlockRoot
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
// This is the root of a finalized block within the weak subjectivity period
// at the time the chain was started, used to initialize the database and chain
// without syncing from genesis.
func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.OriginBlockRoot")
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(originBlockRootKey)
rootSlice := bkt.Get(originCheckpointBlockRootKey)
if rootSlice == nil {
return ErrNotFoundOriginBlockRoot
}
@@ -69,6 +70,25 @@ func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -213,13 +233,17 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
return errDeleteFinalized
return err
}
if err := s.deleteStateSummary(root); err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return errDeleteFinalized
return ErrDeleteJustifiedAndFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
@@ -324,6 +348,22 @@ func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, erro
return blk, err
}
func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
r := bkt.Get(genesisBlockRootKey)
if len(r) == 0 {
return ErrNotFoundGenesisBlockRoot
}
root = bytesutil.ToBytes32(r)
return nil
})
return root, err
}
// SaveGenesisBlockRoot to the db.
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
@@ -334,16 +374,27 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
})
}
// SaveOriginBlockRoot is used to keep track of the block root used for origin sync.
// SaveOriginCheckpointBlockRoot is used to keep track of the block root used for syncing from a checkpoint origin.
// This should be a finalized block from within the current weak subjectivity period.
// This value is used by a running beacon chain node to locate the state at the beginning
// of the chain history, in places where genesis would typically be used.
func (s *Store) SaveOriginBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginBlockRoot")
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(originBlockRootKey, blockRoot[:])
return bucket.Put(originCheckpointBlockRootKey, blockRoot[:])
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
@@ -641,31 +692,27 @@ func unmarshalBlock(_ context.Context, enc []byte) (block.SignedBeaconBlock, err
if err != nil {
return nil, err
}
var rawBlock ssz.Unmarshaler
switch {
case hasAltairKey(enc):
// Marshal block bytes to altair beacon block.
rawBlock := &ethpb.SignedBeaconBlockAltair{}
err := rawBlock.UnmarshalSSZ(enc[len(altairKey):])
if err != nil {
rawBlock = &ethpb.SignedBeaconBlockAltair{}
if err := rawBlock.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
return nil, err
}
return wrapper.WrappedAltairSignedBeaconBlock(rawBlock)
case hasBellatrixKey(enc):
rawBlock := &ethpb.SignedBeaconBlockBellatrix{}
err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):])
if err != nil {
rawBlock = &ethpb.SignedBeaconBlockBellatrix{}
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
return nil, err
}
return wrapper.WrappedBellatrixSignedBeaconBlock(rawBlock)
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock := &ethpb.SignedBeaconBlock{}
err = rawBlock.UnmarshalSSZ(enc)
if err != nil {
rawBlock = &ethpb.SignedBeaconBlock{}
if err := rawBlock.UnmarshalSSZ(enc); err != nil {
return nil, err
}
return wrapper.WrappedPhase0SignedBeaconBlock(rawBlock), nil
}
return wrapper.WrappedSignedBeaconBlock(rawBlock)
}
// marshal versioned beacon block from struct type down to bytes.

View File

@@ -31,7 +31,7 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedPhase0SignedBeaconBlock(b), nil
return wrapper.WrappedSignedBeaconBlock(b)
},
},
{
@@ -42,7 +42,7 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedAltairSignedBeaconBlock(b)
return wrapper.WrappedSignedBeaconBlock(b)
},
},
{
@@ -53,11 +53,28 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedBellatrixSignedBeaconBlock(b)
return wrapper.WrappedSignedBeaconBlock(b)
},
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
expected := [32]byte{}
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := types.Slot(20)
@@ -174,6 +191,16 @@ func TestStore_DeleteBlock(t *testing.T) {
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
ss := make([]*ethpb.StateSummary, len(blks))
for i, blk := range blks {
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
ss[i] = &ethpb.StateSummary{
Slot: blk.Block().Slot(),
Root: r[:],
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
@@ -199,11 +226,50 @@ func TestStore_DeleteBlock(t *testing.T) {
b, err = db.Block(ctx, root2)
require.NoError(t, err)
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
b.Block.Slot = 1
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -212,7 +278,9 @@ func TestStore_GenesisBlock(t *testing.T) {
blockRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blockRoot))
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
retrievedBlock, err := db.GenesisBlock(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(genesisBlock, retrievedBlock.Proto()), "Wanted: %v, received: %v", genesisBlock, retrievedBlock)

View File

@@ -53,7 +53,9 @@ func TestStore_FinalizedCheckpoint_CanSaveRetrieve(t *testing.T) {
}
// a valid chain is required to save finalized checkpoint.
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.
@@ -13,5 +13,11 @@ var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")

View File

@@ -23,7 +23,7 @@ var previousFinalizedCheckpointKey = []byte("previous-finalized-checkpoint")
var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to determine canonical")
// The finalized block roots index tracks beacon blocks which are finalized in the canonical chain.
// The finalized checkpoint contains the the epoch which was finalized and the highest beacon block
// The finalized checkpoint contains the epoch which was finalized and the highest beacon block
// root where block.slot <= start_slot(epoch). As a result, we cannot index the finalized canonical
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
@@ -32,7 +32,7 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
// - De-index all finalized beacon block roots from previous_finalized_epoch to
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
// root until a parent is found in the index or the parent is genesis.
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
//
// This method ensures that all blocks from the current finalized epoch are considered "final" while
@@ -46,6 +46,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
root := checkpoint.Root
var previousRoot []byte
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
// De-index recent finalized block roots, to be re-indexed.
previousFinalizedCheckpoint := &ethpb.Checkpoint{}
@@ -104,6 +105,12 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
if parentBytes := bkt.Get(block.ParentRoot()); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}

View File

@@ -63,7 +63,9 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
}
@@ -207,7 +209,8 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []block.Signed
var err error
previousRoot, err = blocks[j-i].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-i] = wrapper.WrappedPhase0SignedBeaconBlock(blocks[j-i])
ifaceBlocks[j-i], err = wrapper.WrappedSignedBeaconBlock(blocks[j-i])
require.NoError(t, err)
}
return ifaceBlocks
}
@@ -224,7 +227,7 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
var err error
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-startIdx], err = wrapper.WrappedAltairSignedBeaconBlock(blocks[j-startIdx])
ifaceBlocks[j-startIdx], err = wrapper.WrappedSignedBeaconBlock(blocks[j-startIdx])
require.NoError(t, err)
}
return ifaceBlocks

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
@@ -28,7 +26,11 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
if err != nil {
return errors.Wrap(err, "could not get genesis block root")
}
if err := s.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
if err != nil {
return errors.Wrap(err, "could not wrap genesis block")
}
if err := s.SaveBlock(ctx, wsb); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
@@ -50,14 +52,10 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
return nil
}
// LoadGenesis loads a genesis state from a given file path, if no genesis exists already.
func (s *Store) LoadGenesis(ctx context.Context, r io.Reader) error {
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// LoadGenesis loads a genesis state from a ssz-serialized byte slice, if no genesis exists already.
func (s *Store) LoadGenesis(ctx context.Context, sb []byte) error {
st := &ethpb.BeaconState{}
if err := st.UnmarshalSSZ(b); err != nil {
if err := st.UnmarshalSSZ(sb); err != nil {
return err
}
gs, err := statev1.InitializeFromProtoUnsafe(st)

View File

@@ -53,21 +53,16 @@ func TestLoadGenesisFromFile(t *testing.T) {
if err == nil {
fp = rfp
}
r, err := os.Open(fp)
sb, err := os.ReadFile(fp)
assert.NoError(t, err)
defer func() {
assert.NoError(t, r.Close())
}()
db := setupDB(t)
assert.NoError(t, db.LoadGenesis(context.Background(), r))
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
testGenesisDataSaved(t, db)
// Loading the same genesis again should not throw an error
_, err = r.Seek(0, 0)
assert.NoError(t, err)
assert.NoError(t, db.LoadGenesis(context.Background(), r))
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
}
func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
@@ -76,15 +71,12 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
if err == nil {
fp = rfp
}
r, err := os.Open(fp)
sb, err := os.ReadFile(fp)
assert.NoError(t, err)
defer func() {
assert.NoError(t, r.Close())
}()
// Loading a genesis with the wrong fork version as beacon config should throw an error.
db := setupDB(t)
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), r))
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), sb))
}
func TestEnsureEmbeddedGenesis(t *testing.T) {

View File

@@ -51,7 +51,9 @@ var (
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
// block root included in the beacon state used by weak subjectivity initial sync
originBlockRootKey = []byte("origin-block-root")
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")

View File

@@ -346,19 +346,31 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(checkpointBucket)
enc := bkt.Get(finalizedCheckpointKey)
checkpoint := &ethpb.Checkpoint{}
finalized := &ethpb.Checkpoint{}
if enc == nil {
checkpoint = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, checkpoint); err != nil {
finalized = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, finalized); err != nil {
return err
}
enc = bkt.Get(justifiedCheckpointKey)
justified := &ethpb.Checkpoint{}
if enc == nil {
justified = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, justified); err != nil {
return err
}
blockBkt := tx.Bucket(blocksBucket)
headBlkRoot := blockBkt.Get(headBlockRootKey)
bkt = tx.Bucket(stateBucket)
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteJustifiedAndFinalized
}
// Nothing to delete if state doesn't exist.
enc = bkt.Get(blockRoot[:])
if enc == nil {
return nil
}
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
@@ -638,7 +650,11 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
if err != nil {
return 0, err
}
if err := helpers.BeaconBlockIsNil(wrapper.WrappedPhase0SignedBeaconBlock(b)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
if err != nil {
return 0, err
}
if err := helpers.BeaconBlockIsNil(wsb); err != nil {
return 0, err
}
return b.Block.Slot, nil

View File

@@ -110,3 +110,12 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
s.stateSummaryCache.clear()
return nil
}
// deleteStateSummary deletes a state summary object from the db using input block root.
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
s.stateSummaryCache.delete(blockRoot)
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
return bucket.Delete(blockRoot[:])
})
}

View File

@@ -37,6 +37,13 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
return ok
}
// delete state summary in cache.
func (c *stateSummaryCache) delete(r [32]byte) {
c.initSyncStateSummariesLock.Lock()
defer c.initSyncStateSummariesLock.Unlock()
delete(c.initSyncStateSummaries, r)
}
// get retrieves a state summary from the initial sync state summaries cache using the root of
// the block.
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {

View File

@@ -62,3 +62,17 @@ func TestStateSummary_CacheToDB(t *testing.T) {
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
}
}
func TestStateSummary_CanDelete(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
r1 := bytesutil.ToBytes32([]byte{'A'})
s1 := &ethpb.StateSummary{Slot: 1, Root: r1[:]}
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
require.NoError(t, db.SaveStateSummary(ctx, s1))
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
require.NoError(t, db.deleteStateSummary(r1))
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
}

View File

@@ -374,7 +374,9 @@ func TestStore_StatesBatchDelete(t *testing.T) {
for i := 0; i < len(totalBlocks); i++ {
b := util.NewBeaconBlock()
b.Block.Slot = types.Slot(i)
totalBlocks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
var err error
totalBlocks[i], err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := totalBlocks[i].Block().HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -410,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
wantedErr := "cannot delete genesis, finalized, or head state"
wantedErr := "cannot delete finalized block or state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
}
@@ -425,7 +427,9 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 100
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
finalizedBlockRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -436,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
finalizedCheckpoint := &ethpb.Checkpoint{Root: finalizedBlockRoot[:]}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
wantedErr := "cannot delete genesis, finalized, or head state"
wantedErr := "cannot delete finalized block or state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
}
@@ -450,7 +454,9 @@ func TestStore_DeleteHeadState(t *testing.T) {
blk := util.NewBeaconBlock()
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 100
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
headBlockRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -459,8 +465,7 @@ func TestStore_DeleteHeadState(t *testing.T) {
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
wantedErr := "cannot delete genesis, finalized, or head state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
}
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
@@ -470,7 +475,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
@@ -480,7 +487,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 100
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
@@ -490,7 +499,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1000
r2, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1000))
@@ -524,7 +535,9 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -561,7 +574,9 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
b.Block.ParentRoot = prevRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
bRoots = append(bRoots, r)
prevRoot = r
@@ -600,7 +615,9 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -628,7 +645,9 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
unfinalizedRoots = append(unfinalizedRoots, r)
st, err := util.NewBeaconState()

View File

@@ -52,7 +52,9 @@ func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
}
// a valid chain is required to save finalized checkpoint.
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))

View File

@@ -2,74 +2,87 @@ package kv
import (
"context"
"io"
"io/ioutil"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
// (ex: an open file) prepares the database so that the beacon node can begin
// syncing, using the provided values as their point of origin. This is an alternative
// to syncing from genesis, and should only be run on an empty database.
func (s *Store) SaveOrigin(ctx context.Context, stateReader, blockReader io.Reader) error {
sb, err := ioutil.ReadAll(stateReader)
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
genesisRoot, err := s.GenesisBlockRoot(ctx)
if err != nil {
return errors.Wrap(err, "failed to read origin state bytes")
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
}
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
}
bb, err := ioutil.ReadAll(blockReader)
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
if err != nil {
return errors.Wrap(err, "error reading block given to SaveOrigin")
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
}
cf, err := detect.FromState(sb)
cf, err := detect.FromState(serState)
if err != nil {
return errors.Wrap(err, "failed to detect config and fork for origin state")
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
}
bs, err := cf.UnmarshalBeaconState(sb)
if err != nil {
return errors.Wrap(err, "could not unmarshal origin state")
}
wblk, err := cf.UnmarshalBeaconBlock(bb)
if err != nil {
return errors.Wrap(err, "unable to unmarshal origin SignedBeaconBlock")
_, ok := params.BeaconConfig().ForkVersionSchedule[cf.Version]
if !ok {
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
}
blockRoot, err := wblk.Block().HashTreeRoot()
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
state, err := cf.UnmarshalBeaconState(serState)
if err != nil {
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
}
wblk, err := cf.UnmarshalBeaconBlock(serBlock)
if err != nil {
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
}
blk := wblk.Block()
// save block
blockRoot, err := blk.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
// save block
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")
}
// save state
if err = s.SaveState(ctx, bs, blockRoot); err != nil {
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
if err = s.SaveState(ctx, state, blockRoot); err != nil {
return errors.Wrap(err, "could not save state")
}
if err = s.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: bs.Slot(),
Slot: state.Slot(),
Root: blockRoot[:],
}); err != nil {
return errors.Wrap(err, "could not save state summary")
}
// save origin block root in special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
// mark block as head of chain, so that processing will pick up from this point
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
}
// save origin block root in a special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
// rebuild the checkpoint from the block
// use it to mark the block as justified and finalized
slotEpoch, err := wblk.Block().Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)

View File

@@ -0,0 +1,49 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestSaveOrigin(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.ConfigNames[params.Mainnet]
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
db := setupDB(t)
st, err := genesis.State(params.Mainnet.String())
require.NoError(t, err)
sb, err := st.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.LoadGenesis(ctx, sb))
// this is necessary for mainnet, because LoadGenesis is short-circuited by the embedded state,
// so the genesis root key is never written to the db.
require.NoError(t, db.EnsureEmbeddedGenesis(ctx))
cst, err := util.NewBeaconState()
require.NoError(t, err)
csb, err := cst.MarshalSSZ()
require.NoError(t, err)
cb := util.NewBeaconBlock()
scb, err := wrapper.WrappedSignedBeaconBlock(cb)
require.NoError(t, err)
cbb, err := scb.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
broot, err := scb.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, db.IsFinalizedBlock(ctx, broot))
}

View File

@@ -27,7 +27,9 @@ func TestRestore(t *testing.T) {
require.NoError(t, err)
head := util.NewBeaconBlock()
head.Block.Slot = 5000
require.NoError(t, backupDb.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(head)))
wsb, err := wrapper.WrappedSignedBeaconBlock(head)
require.NoError(t, err)
require.NoError(t, backupDb.SaveBlock(ctx, wsb))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()

View File

@@ -12,6 +12,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",

View File

@@ -19,14 +19,16 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -45,6 +47,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//config/params:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -3,8 +3,8 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errUnknownPayloadHash = errors.New("unknown payload hash")

View File

@@ -2,12 +2,15 @@ package doublylinkedtree
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -18,6 +21,7 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
pruneThreshold: defaultPruneThreshold,
}
@@ -168,7 +172,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
}
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
@@ -249,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
return ErrNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": currentNode.balance,
"nodeWeight": currentNode.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
}
currentNode.balance -= oldBalance
}
}
@@ -302,6 +318,6 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.removeNode(ctx, root)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, payloadHash)
}

View File

@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()

View File

@@ -3,9 +3,12 @@ package doublylinkedtree
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_head_slot",

View File

@@ -109,7 +109,7 @@ func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) boo
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()

View File

@@ -180,27 +180,27 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
opt, err := f.IsOptimistic(ctx, indexToHash(5))
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(4))
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(ctx, indexToHash(5))
opt, err = f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(ctx, indexToHash(4))
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(3))
opt, err = f.IsOptimistic(indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
}

View File

@@ -2,22 +2,54 @@ package doublylinkedtree
import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
s.nodesLock.Lock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
s.nodesLock.Unlock()
return invalidRoots, ErrNilNode
}
// Check if last valid hash is an ancestor of the passed node.
lastValid, ok := s.nodeByPayload[payloadHash]
if !ok || lastValid == nil {
s.nodesLock.Unlock()
return invalidRoots, errUnknownPayloadHash
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
s.nodesLock.Unlock()
return invalidRoots, ctx.Err()
}
}
// If the last valid payload is in a different fork, we remove only the
// passed node.
if firstInvalid.parent == nil {
firstInvalid = node
}
s.nodesLock.Unlock()
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, root [32]byte) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
if node == nil {
return invalidRoots, ErrNilNode
}
if !node.optimistic || node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
@@ -47,6 +79,16 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
}
}
invalidRoots = append(invalidRoots, node.root)
s.proposerBoostLock.Lock()
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
s.proposerBoostLock.Unlock()
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -24,32 +24,71 @@ import (
func TestPruneInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
payload [32]byte // the last valid hash
wantedNodeNumber int
wantedRoots [][32]byte
}{
{
[32]byte{'j'},
[32]byte{'B'},
12,
[][32]byte{[32]byte{'j'}},
},
{
[32]byte{'c'},
[32]byte{'B'},
4,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'},
[32]byte{'k'}, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'c'}},
},
{
[32]byte{'i'},
[32]byte{'H'},
12,
[][32]byte{[32]byte{'i'}},
},
{
[32]byte{'h'},
[32]byte{'G'},
11,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}},
},
{
[32]byte{'g'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'i'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'f'},
[32]byte{'D'},
11,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}},
},
{
[32]byte{'h'},
[32]byte{'C'},
5,
[][32]byte{
[32]byte{'f'},
[32]byte{'e'},
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'l'},
[32]byte{'k'},
[32]byte{'g'},
[32]byte{'d'},
},
},
{
[32]byte{'g'},
[32]byte{'E'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
@@ -58,22 +97,45 @@ func TestPruneInvalid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
roots, err := f.store.removeNode(context.Background(), tc.root)
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.payload)
require.NoError(t, err)
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
}
}
// This is a regression test (10445)
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}

View File

@@ -2,12 +2,10 @@ package doublylinkedtree
import (
"context"
"time"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/time/slots"
)
// BoostProposerRoot sets the block root which should be boosted during
@@ -19,17 +17,18 @@ import (
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
// store.proposer_boost_root = hash_tree_root(block)
func (f *ForkChoice) BoostProposerRoot(_ context.Context, blockSlot types.Slot, blockRoot [32]byte, genesisTime time.Time) error {
func (f *ForkChoice) BoostProposerRoot(_ context.Context, args *forkchoicetypes.ProposerBoostRootArgs) error {
if args == nil {
return errors.New("nil function args provided to BoostProposerRoot")
}
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
timeIntoSlot := uint64(time.Since(genesisTime).Seconds()) % secondsPerSlot
isBeforeAttestingInterval := timeIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
currentSlot := slots.SinceGenesis(genesisTime)
isBeforeAttestingInterval := args.SecondsIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
// Only update the boosted proposer root to the incoming block root
// if the block is for the current, clock-based slot and the block was timely.
if currentSlot == blockSlot && isBeforeAttestingInterval {
if args.CurrentSlot == args.BlockSlot && isBeforeAttestingInterval {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = blockRoot
f.store.proposerBoostRoot = args.BlockRoot
f.store.proposerBoostLock.Unlock()
}
return nil

Some files were not shown because too many files have changed in this diff Show More