Compare commits

...

100 Commits

Author SHA1 Message Date
nisdas
38bc62b3ff save progress 2022-04-04 19:29:32 +08:00
nisdas
7f637d72b2 fix error log 2022-04-04 19:24:44 +08:00
nisdas
6ea8c3b2fc Merge remote-tracking branch 'origin/addLockAnalyzer' into addLockAnalyzer 2022-04-04 17:15:21 +08:00
nisdas
99f33630f6 fix failures 2022-04-04 17:15:10 +08:00
Nishant Das
fa2f17662a Merge branch 'develop' into addLockAnalyzer 2022-04-04 15:43:00 +08:00
nisdas
88b4b68204 progress 2022-04-04 15:42:35 +08:00
Mohamed Zahoor
896d186e3b process the optimistic blocks whose parent are optimistic too (#10350)
* process the optimistic blocks whose parent are optimistic too

* adding unit test

* fix test

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-04-03 23:57:52 -03:00
nisdas
624e124f38 fix locks 2022-04-04 08:46:19 +08:00
nisdas
e48ae09762 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into addLockAnalyzer 2022-04-03 23:28:45 +08:00
Raul Jordan
edb98cf499 Move Engine API to Powchain and Consolidate Connection Management (#10438)
* gazelle

* tests passing

* use the same engine client

* pass

* initialize in right place

* erge

* build

* imports

* ensure engine checks work

* pass powchain tests

* powchain tests pass

* deepsource

* fix up node issues

* gaz

* endpoint use

* baz

* b

* conf

* lint

* gaz

* move to start function

* test pass
2022-04-01 14:04:24 -04:00
Potuz
ce15823f8d Save head after FCU (#10466)
* Update head after FCU

* rename function

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-04-01 00:08:01 +00:00
terence tsao
7cdc741b2f Sync: don't set block to bad due to timeout (#10470)
* Add filter error and tests

* Update BUILD.bazel

* Kasey's feedback
2022-03-31 23:29:27 +00:00
terence tsao
5704fb34be Remove duplicated engine mock (#10472)
* Rm mock

* udpate

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-03-31 19:14:09 -04:00
Preston Van Loon
3e2a037d42 github workflows: pin go version to 1.17 (#10471)
* github workflows: pin go version to 1.17

* Update go.yml

* Revert "Update go.yml"

This reverts commit 4a2d36d05d.

* pin golangci-lint

* try go 1.17

* Revert "Revert "Update go.yml""

This reverts commit 8a89663874.

* move and increase version of checkout

* attempt to ignore export path

* try with entrypoint only

* try some rearranging of stuff

* Split up jobs

* Use hack mentioned in https://github.com/securego/gosec/issues/469\#issuecomment-643823092

* Delete dappnode release trigger

* rm id

* try pin golangci-lint version

* try pin golangci-lint version

* Do not provide a specific go version and lets see what happens

* comment checkout, wtf is wrong with github actions

* it works locally...

* trying with some cache key for lint...

* Revert "trying with some cache key for lint..."

This reverts commit c4f5ae4495.

* try tellign it to skip go installation

* revert commented line, do something to satisify deepsource

* do something to satisify deepsource
2022-03-31 22:16:14 +00:00
Potuz
177f9ccab0 Return historical non-canonical blocks as optimistic (#10446)
* Return historical non-canonical blocks as optimistic

* terence's review
2022-03-31 12:51:14 +00:00
Potuz
649dee532f insert block to forkchoice after notifyNewPayload (#10453)
* insert block to forkchoice after notifyNewPayload

* Add optimistic candidate check

* Terence's review

* Apply suggestions from code review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-31 09:17:49 -03:00
terence tsao
4b3364ac6b Log terminal total difficulty status (#10457)
* Log terminal total diff status

* Update check_transition_config.go

* Update BUILD.bazel

* Update check_transition_config.go

* Update check_transition_config.go

* Update check_transition_config.go
2022-03-31 03:05:23 +00:00
kasey
0df8d7f0c0 refactor genesis state flag handling, support url (#10449)
* refactor genesis state flag handling, support url

* lint fix

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-30 22:23:34 +00:00
Mike Neuder
ade7d705ec Cleanup of Keymanager Deleter interface (#10415)
* Migrating Keymanager account list functionality into each keymanager type

* Addressing review comments

* Adding newline at end of BUILD.bazel

* bazel run //:gazelle -- fix

* account deleter cleanup

* bazel run //:gazelle -- fix

* remove stale logging statement

* adding deleter interface to mock functions

* fixing deepsource findings

* go.sum

* bazel run //:gazelle -- fix

* go mod t-dy -compat=1.17

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-03-30 16:52:54 -05:00
Preston Van Loon
c68894b77d Spectest: Improve test size and timeouts (#10461)
* Adjust test sizes

* Improve the bazel test attributes for forkchoice and other tests

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-30 20:04:42 +00:00
terence tsao
fe98d69c0a Clean up blockchain pkg (#10452)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-30 17:45:24 +00:00
terence tsao
7516bc0316 Fix execution block's base fee endianess marshal/unmarshal (#10459) 2022-03-30 10:17:18 -07:00
Radosław Kapka
be6f3892e8 Align Beacon API with 2.2.0 spec (#10455) 2022-03-30 15:17:19 +00:00
Potuz
588605ceeb Remove TODOs that were already taken care of (#10454) 2022-03-30 13:24:02 +00:00
terence tsao
59b9519284 Forkchoice spec test: set boost with deterministic timing (#10428)
* Set boost with deterministic timing

* confs

* gaz

* Update runner.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-29 19:48:33 +00:00
Nishant Das
d4038fb752 add rlock (#10444) 2022-03-29 06:55:41 -07:00
Raul Jordan
56ab5872bb Add Histogram Response Buckets to Engine API Methods (#10414)
* gaz

* defer payload func

* amend

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 22:17:31 +00:00
Raul Jordan
eb6d68a4b1 Deterministic Proposer Root Boosting in Forkchoice (#10427)
* begin refactor

* fix doubly linked tree tests

* pass

* fix up

* gaz

* buidl

* buidl more

* comment

* args check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 21:34:41 +00:00
kasey
7920528ede Checkpoint Sync 4/5 - enable checkpoint sync to be used by beacon node (#10386)
* enable checkpoint sync in beacon node

* lint fix

* rm unused error

* addressing PR feedback from Radek

* consistent slice -> fixed conversion

Co-authored-by: kasey <kasey@users.noreply.github.com>
2022-03-28 21:01:55 +00:00
Raul Jordan
1af3c07ec5 Stop Checking Transition Configuration Post-Merge (#10413)
* no longer check transition config after the merge

* nil check

* better payload check

* stop checking post merge based on received blocks

* add proper test

* gaz

* fix test

* lint

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-28 17:14:02 +00:00
terence tsao
7279349ae2 Cleanup helpers in beacon-chain/core/blocks/payload.go (#10435)
* less fragile check for execution payload

* gaz

* gaz

* include field

* clean up helpers for checking merge status

* Update beacon-chain/core/blocks/payload.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update payload.go

* Rename

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-28 15:25:49 +00:00
Potuz
2d60d04b57 Update LastValidatedCheckpoint on DB (#10412)
* Update LastValidatedCheckpoint on DB

* fix conflicts
2022-03-28 14:48:14 +00:00
Nishant Das
ef8bc97d3e more efficient (#10440) 2022-03-28 20:27:14 +08:00
Nishant Das
071f6de559 Fix Beacon API Responses For Bellatrix (#10436)
* fix bugs

* fix build

* simplify it
2022-03-26 07:34:29 +00:00
Preston Van Loon
b697463da9 wrapper: Remove deprecated fork specific wrapper methods for signed beacon blocks (#10369)
* Remove deprecated wrappers, lots of refactoring

* Revert proto/prysm/v1alpha1/validator.proto

* fix tests

* fix test

* fix conversion in e2e

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 23:00:44 +00:00
kasey
bfbf693660 Checkpoint Sync 3/5 - beacon node api client lib and prysmctl cli tool (#10385)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-25 17:18:03 +00:00
Radosław Kapka
ed07f4bd77 Add execution_optimistic field to API responses (#10389)
* protos

* grpc

* middleware

* event protos

* event backend

* event middleware

* fix tests

* use `IsOptimisticForRoot` with proper block instead of header

* better algorithm

* return proper error

* review

* optimistic return on bellatrix proposal

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 16:31:50 +00:00
terence tsao
1922416cac Use correct pre state to call new payload (#10416)
* Update todo strings

* Go fmt

* add merge specific checks when receiving a block from gossip

* Fix beacon chain build

* Interop merge beacon state

* fix finding Transactions size

* Update go commit

* Merge union debugging (#9751)

* changes test cases per ssz changes

* noisy commit, restoring pb field order codegen

* get rid of codegen garbage

* M2 works with Geth  🎉

* Fix bazel build //...

* restoring generated pb field ordering

* defensive nil check

* separate ExecutionPayload/Header from codegen

* tell bazel about this new file

* Merge: support terminal difficulty override (#9769)

* Fix finding terminal block hash calculation

* Update mainnet_config.go

* Update beacon_block.pb.go

* Various fixes to pass all spec tests for Merge (#9777)

* Proper upgrade altair to merge state

* Use uint64 for ttd

* Correctly upgrade to merge state + object mapping fixes

* Use proper receive block path for initial syncing

* Disable contract lookback

* Disable deposit contract lookback

* Go fmt

* Merge: switch from go bindings to raw rpc calls (#9803)

* Disable genesis ETH1.0 chain header logging

* Update htrutils.go

* all gossip tests passing

* Remove gas validations

* Update penalty params for Merge

* Fix gossip and tx size limits for the merge part 1

* Remove extraneous p2p condition

* Add and use

* Add and use TBH_ACTIVATION_EPOCH

* Update WORKSPACE

* Update Kintsugi engine API (#9865)

* Kintsugi ssz (#9867)

* All spec tests pass

* Update spec test shas

* Update Kintsugi consensus implementations (#9872)

* Remove secp256k1

* Remove unused merge genesis state gen tool

* Manually override nil transaction field. M2 works

* Fix bad hex conversion

* Change Gossip message size and Chunk SIze from 1 MB t0 10MB (#9860)

* change gossip size and chunk size after merge

* change ssz to accomodate both changes

* gofmt config file

* add testcase for merge MsgId

* Update beacon-chain/p2p/message_id.go

Change MB to Mib in comment

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* change function name from altairMsgID to postAltairMsgID

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* Sync with develop

* Merge branch 'develop' of github.com:prysmaticlabs/prysm into kintsugi

* Update state_trie.go

* Clean up conflicts

* Fix build

* Update config to devnet1

* Fix state merge

* Handle merge test case for update balance

* Fix build

* State pkg cleanup

* Fix a bug with loading mainnet state

* Fix transactions root

* Add v2 endpoint for merge blocks (#9802)

* Add V2 blocks endpoint for merge blocks

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* go mod

* fix transactions

* Terence's comments

* add missing file

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Sync

* Go mod tidy

* change EP field names

* latest kintusgi execution api

* fix conflicts

* converting base fee to big endian format (#10018)

* ReverseByteOrder function does not mess the input

* sync with develop

* use merge gossip sizes

* correct gossip sizes this time

* visibility

* clean ups

* Sync with develop, fix payload nil check bug

* Speed up syncing, hide cosmetic errors

* Sync with develop

* Clean up after sync

* Update generate_keys.go

* sync with develop

* Update mainnet_config.go

* Clean ups

* Sync optimistically candidate blocks (#10193)

* Revert "Sync optimistically candidate blocks (#10193)"

This reverts commit f99a0419ef.

* Sync optimistically candidate blocks (#10193)

* allow optimistic sync

* Fix merge transition block validation

* Update proposer.go

* Sync with develop

* delete deprecated client, update testnet flag

* Change optimistic logic (#10194)

* Logs and err handling

* Fix build

* Clean ups

* Add back get payload

* c

* Done

* Rm uncommented

* Optimistic sync: prysm validator rpcs (#10200)

* Logs to reproduce

* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

* Fix marshal

* Update json_marshal_unmarshal.go

* Log

* string total diff

* str

* marshal un

* set string

* json

* gaz

* Comment out optimistic status

* remove kiln flag here (#10269)

* Sync with devleop

* Sync with develop

* clean ups

* refactor engine calls

* Update process_block.go

* Fix deadlock, uncomment duty opt sync

* Update proposer_execution_payload.go

* Sync with develop

* Rm post state check

* Bypass eth1 data checks

* Update proposer_execution_payload.go

* Return early if ttd is not reached

* Sync with devleop

* Update process_block.go

* Update receive_block.go

* Update bzl

* Revert "Update receive_block.go"

This reverts commit 5b4a87c512.

* Fix run time

* Update go.mod

* Fix AltairCompatible to account for future state version

* Update proposer_execution_payload.go

* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload

* Log bellatrix fields

* Update log.go

* Friendly fee recipient log

* Remove extra SetOptimisticToValid

* Fix base fee per gas

* Fix notifypayload headroot

* clean up with develop branch

* Sync with devleop

* Fix

* Fix

* fix

* Fix

* fix tests

* revert some changes

* fix tests

* Update optimistic_sync_test.go

* Simplify IsExecutionEnabledUsingHeader

Co-authored-by: Zahoor Mohamed <zahoor@prysmaticlabs.com>
Co-authored-by: Zahoor Mohamed <zahoor@zahoor.in>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 16:03:15 +00:00
Potuz
a5dc40393d Allow inner protoarray nodes to become VALID (#10418)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-25 15:35:44 +00:00
Raul Jordan
b238235210 prevent run (#10432) 2022-03-25 11:08:38 -04:00
nisdas
e4bdeed3a6 add lock analyzer 2022-03-25 10:36:19 +08:00
terence tsao
73a6a85069 Ensure finalized root can't be zeros (#10422) 2022-03-24 23:55:54 +00:00
terence tsao
ac4a30588f Properly initialize replayer builder for rpc (#10426) 2022-03-24 23:12:47 +00:00
Raul Jordan
3579551f15 Use --http-web3provider for Execution Engine Connection (#10307)
* combine endpoints

* initialization in the start function instead

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-03-24 20:53:07 +00:00
Nishant Das
956e7a7563 Update Go-Ethereum (#10417)
* add changes

* fix CI

* wait here

* Fix gazelle with the correct directive for libp2p

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-24 19:00:53 +00:00
kasey
acd7e62cfb fix issue #10420 (#10421)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-24 18:32:39 +00:00
Radosław Kapka
165f0667f0 Implement getDebugChainHeadsV2 API endpoint (#10419)
* protos

* grpc

* middleware

* update comments
2022-03-24 13:08:40 +00:00
kasey
49826ebe28 Checkpoint Sync 2/5 - API support for retrieving weak subjectivity data (#10384)
also refactor replayer code for better reuse:
- separately expose stategen's canonical block func
- CanonicalHistory in ws api, NewCanonicalBuilder
- refactor CanonicalHistory into a ReplayerBuilder

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-23 22:54:07 +00:00
terence tsao
3ad6abd9c0 Add tracings for engine api methods (#10407)
* Add engine call tracings

* Update BUILD.bazel

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-23 22:17:09 +00:00
terence tsao
c61f54176d Ignore attestations voting for the wrong finalized checkpoint (#10295)
* Ignore attestations voting for the wrong finalized checkpoint

* aggregate

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-23 21:48:26 +00:00
Potuz
0165351db3 Setup Optimistic sync when starting the service (#10397)
* Load Last Validated Checkpoint at service startup and initialize
Forkchoice

* fix spectests

* Update service_test.go

* wip

* Fix blockchain tests

* remove unnecessary summary saves

* Terence's suggestion

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-23 21:11:36 +00:00
Potuz
8f49167117 return invalid roots from SetOptimisticToInvalid (#10403)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-23 15:55:05 -03:00
Nishant Das
b4c27f64de Add Method To Modify Participation Bits (#10409)
* fix up all tests

* Update beacon-chain/state/v2/BUILD.bazel

* add tests
2022-03-23 23:18:02 +08:00
Preston Van Loon
e4ac5e74b7 RPC: Deduplicate block production logic (#10368)
* RPC: Deduplicate block production logic

* Surface error to caller

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-23 12:51:20 +00:00
Nishant Das
0b9b635646 Block Processing And State Transition Improvments (#10408)
* cleanup

* clean up more

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-03-23 11:57:48 +00:00
Leo Lara
c8c1d04c07 Complete coverage for in beacon chain state for getters checkpoint (#10373)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-03-23 09:58:45 +00:00
james-prysm
601987faf2 Fee Recipient : ux log fixes and prevent validator from calling beacon node if flags are not provided. (#10406)
* initial commit

* improving logs
2022-03-22 13:13:59 -05:00
james-prysm
a060d765b3 rename prepare beacon proposer to fee recipient (#10404) 2022-03-22 15:00:50 +00:00
Potuz
5550334956 set optimistic status on notify payload (#10398)
* set optimistic status on notify payload

* pre-execution blocks are never optimistic

* add unit test

* add test for lengths

* Update optimistic_sync_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-22 03:35:02 +00:00
Mike Neuder
002253bba3 Migrating Keymanager account list functionality into each type (#10382)
* Migrating Keymanager account list functionality into each keymanager type

* Addressing review comments

* Adding newline at end of BUILD.bazel

* bazel run //:gazelle -- fix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-03-22 03:04:09 +00:00
Radosław Kapka
c055642f79 Return correct header root from API calls (#10401)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-22 01:56:44 +00:00
terence tsao
c7d64c03ac Insert payload hash to forkchoice store nodes (#10400)
* Insert payload hash to forkchoice store nodes

* Fix tests

* Update new_slot_test.go

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-22 01:20:42 +00:00
james-prysm
131fb43ba7 Fee recipient flag rename for beacon node (#10402)
* initial commit

* fixing comment

* fixing bazel
2022-03-21 21:25:33 +00:00
kasey
cf0bd633f0 Checkpoint Sync 1/5 - fork/version detection and unmarshaling support (#10380)
* fork/version detection and unmarshaling support

* Update config/params/config.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/detect/configfork.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* PR feedback

* move ssz initialization into the detect package

* clarify comment

* VersionForEpoch is much simpler/clearer in reverse

* simpler VersionForEpoch; build AllConfigs in init

* use fieldparams for Version

* Update proto/detect/configfork_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* remove custom ForkName type, use runtime/version

* pr cleanup

* random fix from bad gh ui suggestion; privatize

* privatize fieldSpec methods; + unit tests

* Update proto/detect/configfork.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* fix bad github ui suggestion

* ensure unique versions for simpler config match

* fmt & adding unit test for ByState()

* table-driven unit test for ByState

* TestUnmarshalState

* OrderedSchedule -> network/forks per PR feedback

* goimports

* lint fixes

* move proto/detect -> ssz/encoding/detect

* use typeUndefined in String

* backport config tests from e2e PR

* fix config parity test; make debugging it easier

* lint

* fix fork schedule initialization

* cleanup

* fix build

* fix big ole derp

* anything for you, deep source

* goimportsss

* InitializeForkSchedule in LoadChainConfigFile

* PR feedback

Co-authored-by: kasey <kasey@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-21 19:43:41 +00:00
james-prysm
df8da80db8 Implement Fee Recipient: CLI flags and Calling Beacon API (#10312)
* send proposer data to config

* wip implementation with file and url based config import

* improving logic on get validator index

* fix function

* optimizing function for map and address bug

* fixing log

* update cache if it doesn't exist

* updating flags

* initial unit test scaffold

* fixing validator to call rpc call, removed temporary dependency

* adding the API calls for the runner

* fixing broken build

* fixing deepsource

* fixing interface

* fixing fatal

* fixing more deepsource issues

* adding test placeholders

* updating proposer config to add validation

* changing how if statement throws error

* removing unneeded validation, validating in a different way

* wip improving tests

* more unit test work

* fixing unit test

* fixing unit tests and edge cases

* adding unit tests and adjusting how the config is created

* fixing bazel builds

* fixing proto generation

* fixing imports

* fixing unit tests

* Update cmd/validator/flags/flags.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* updating flags based on comments, fixing unit tests

* fixing bazel

* removing unneeded function

* fixing unit tests

* refactors and unit test fixes based on comments

* fixing bazel build

* refactor the cache out fo the fee recipient function

* adding usecase for multiple fee recipient

* refactor burn name

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* fixing bug with validator index based on code review

* edited flag descriptions to better communicate usage

* fixing manual reference to flag name

* fixing code review comments

* fixing linting

* Update validator/client/validator.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* addressing comments and renaming functions

* fixing linting

* Update cmd/validator/flags/flags.go

* Update cmd/validator/flags/flags.go

* Update cmd/validator/flags/flags.go

* improving comments

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-03-21 13:48:02 -05:00
Raul Jordan
dc527a3c80 Remove Visibility Check Script for Bazel (#10391)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-21 17:03:52 +00:00
Potuz
94f80bd208 Call FCU on update head after process attestation (#10394)
* Call FCU on update head after process attestation

* add test

* fix test

* terence review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-21 01:50:08 +00:00
terence tsao
c6bfefa0bc Add notifyNewPayload to onBlockBatch (#10392) 2022-03-18 09:36:46 -07:00
Radosław Kapka
3446eaa5f2 Add missing tests to state retrieval (#10390) 2022-03-17 18:40:07 +00:00
terence tsao
193866c731 Add fcu to batch blk processing (#10371) 2022-03-17 07:35:50 -07:00
terence tsao
b90ce1b60f Bellatrix fork choice spec tests (#10381)
* Add bellatrix forkchoice spec tests

* Update service.go

* better conversion
2022-03-17 05:30:44 +00:00
terence tsao
8449d24ed0 Fix BaseFeePerGas endianess (#10376)
* Fix base fee endianess and add more test entropys

* Update json_marshal_unmarshal_test.go

* Update setters_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-16 22:12:08 +00:00
Radosław Kapka
b92b01f379 API middleware support for bellatrix in produceBlockV2 (#10374) 2022-03-16 21:44:15 +00:00
terence tsao
bcd180ee4d Remove deprecated set_optimistic_to_valid calls (#10370) 2022-03-16 21:14:13 +00:00
Raul Jordan
5f10e51a49 Add API Middleware Support for Fee Recipients Endpoint (#10377)
* add in success log

* add in info log

* uint64 for schema

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-16 20:02:26 +00:00
terence tsao
c4f03868ce Update default fee warning (#10378)
* Add fcu to batch blk processing

* Log error only if is burned

* Revert some changes

* Burn address
2022-03-16 19:26:51 +00:00
james-prysm
d55c0d74dd PrepareBeaconProposer API (#10367)
* intial commit implementing prepare beacon proposal

* removing files accidently checked in by goland

* removing files accidently checked in by goland

* Delete BUILD 2

* Delete armeabi_cc_toolchain_config 2.bzl

* Delete builtin_include_directory_paths 2

* Delete cc_toolchain_config 2.bzl

* Delete BUILD 2.bazel

* Delete cc_wrapper 2.sh

* Delete module 2.modulemap

* Delete BUILD 2

* Delete BUILD 2

* Delete WORKSPACE 2

* Delete cc_toolchain_config_linux_arm64 2.bzl

* Delete cc_toolchain_config_osx 2.bzl

* Delete cc_toolchain_config_windows 2.bzl

* Delete 0258716f94e00f9df0da869fd97f9e0d0c6ac83eb528677d918f0ac9be5f4b8d 2

* Delete test-unmarshal-bad 2.json

* Delete test-unmarshal-good 2.json

* adding simple unit tests

* removing duplicate imports

* adding bazel changes

* adding validation and switched to table driven tests

* updating based on comments

* adding to test retrieve from db

* fixing bazel
2022-03-16 11:17:41 -04:00
Potuz
807b71244b Add database methods for optimistic sync (#10357)
* Add database methods for optimistic sync

* Add epoch comparison

* add extra epoch comparison

* Summary instead of block

* fix tests

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-15 20:52:59 +00:00
Potuz
2744eba391 Update optimisticCandidateBlock to specs version (#10355)
* Update optimisticCandidateBlock to specs version

* fix double import

* Change error message
2022-03-15 19:12:52 +00:00
Leo Lara
5507558678 Some improvements for the state package testing (#10316)
* Refactor to unify state getters block tests

* Add reference tests to Altair and Bellatrix versions ofthe state

* Fix function naming convetion

* Add state-native/v2/references_test.go and state-native/v3/references_test.go

* Gazelle run

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-03-15 18:32:09 +00:00
terence tsao
8cecd4e8bf Log bellatrix fields (#10365)
* Log bellatrix fields

* Update log.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-15 14:02:39 +00:00
terence tsao
69a9388515 Spec test v1.1.10 (#10298)
* spectest

* uncomment shas

* Fc spec test service shouldn't not spawn attestation routine

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-03-15 13:17:51 +00:00
terence tsao
bd308b6d73 Update default fee recipient error log (#10366) 2022-03-14 20:35:56 -07:00
Radosław Kapka
9abea200a5 Add Bellatrix version to ProduceBlock API (#10361)
* Add Bellatrix version to `ProduceBlock` API

* fix errors

* fix test

* Revert "Auxiliary commit to revert individual files from 8cc6fc9ca3b6ddb67aeafeaddc2a2e1ca6610ded"

This reverts commit 6358ee92d899e8ee6587ad5503cebb40ac92add4.

# Conflicts:
#	beacon-chain/blockchain/optimistic_sync_test.go
#	beacon-chain/blockchain/pow_block_test.go
#	beacon-chain/blockchain/testing/mock_engine.go

* use proper mock

* bzl

* fix build file

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-14 23:08:01 +00:00
terence tsao
730678bf21 Handle pre state Altair with valid payload (#10364)
* Handle pre state Altair with valid payload

* Handle pre state Altair with valid payload
2022-03-14 22:40:08 +00:00
kasey
0b1a777d62 ran gofmt @ v1.17, these are the resulting changes (#10362)
* ran gofmt @ v1.17, these are the resulting changes

* fix some flaky tests that are tripping up this PR

* gofmt

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-14 15:58:13 -05:00
terence tsao
5461c5b84f Insert stored fee recipient for ForkchoiceUpdated call (#10349)
* Add default

* Add and use fee recipient in db

* Update BUILD.bazel

* Feedback

* Add basic addr check

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-14 18:44:40 +00:00
Preston Van Loon
03e7acbf9f increase bazel java max mem to 4gb (#10363)
* increase bazel java max mem to 4gb

* DEBUG: set nocache and modify nogo to force any disk cache misses. Revert this commit after testing in CI

* Revert "DEBUG: set nocache and modify nogo to force any disk cache misses. Revert this commit after testing in CI"

This reverts commit dc41cce43d.
2022-03-14 17:53:42 +00:00
Potuz
6cc88e6454 enable optimistic sync (#10358)
* enable optimistic sync

* revert computation of HTR in notifyNewPayload

* fix tests

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-14 17:25:40 +00:00
terence tsao
65cea17268 Return early if ttd is not reached (#10359) 2022-03-14 16:38:05 +00:00
Preston Van Loon
aa86c94a91 Update bazel to 5.0.0 (#10352)
* Update to bazel 5

* Add new bazel 5 experimental flags to improve remote caching

* regen cross compile for bazel 5

* gazelle

* remove some old flags

* use heremtic builds sandbox

* minimal downloads for better disk usage

* Restore manual tags for darwin builds
2022-03-14 15:05:08 +00:00
Radosław Kapka
75bb25d515 Bellatrix publish block - API Middleware fixes (#10348)
* Bellatrix publish block - API Middleware fixes

* update field name

* correct docs
2022-03-14 13:36:46 +00:00
terence tsao
1e845bc276 Fix sync committee assignment for Bellatrix (#10356)
* Fix AltairCompatible to account for future state version

* Tests

* gaz

* More tests

* Update comments

* Rename to HigherThanAltairVersionAndEpoch

* HigherEqualThanAltairVersionAndEpoch

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-03-14 01:00:33 +00:00
Nishant Das
58f7e942f2 Fix Mnemonic Validation (#10354)
* fix mnemonic validation

* potuz's review

* Add fuzz test for wallet recover

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-03-14 07:30:11 +08:00
Potuz
3f1e3cf82f remove optimistic control flag (#10340)
* remove optimistic control flag

* Rename

* More

* More

* redundant return

* fix conflict

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-03-12 17:32:04 +00:00
terence tsao
60003c481b Add unlock to IsOptimistic returns (#10341)
* Add unlock to returns

* add regression test

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-11 10:36:47 +00:00
Preston Van Loon
c1197d7881 Add static analysis for unsafe uint casting (#10318)
* Add static analysis for unsafe uint casting

* Fix violations of uintcast

* go mod tidy

* Add exclusion to nogo for darwin build

* Add test for math.Int

* Move some things to const so they are assured not to exceed int64

* Self review

* lint

* fix tests

* fix test

* Add init check for non 64 bit OS

* Move new deps from WORKSPACE to deps.bzl

* fix bazel build for go analysis runs

* Update BUILD.bazel

Remove TODO

* add math.AddInt method

* Add new test casts

* Add case where builtin functions and declared functions are covered

* Fix new findings

* cleanup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-03-11 09:34:30 +00:00
Nishant Das
693cc79cc9 Minor Cleanup from #10255 (#10337)
* minor cleanup

* kasey's review

* kasey's review
2022-03-11 07:34:15 +00:00
kasey
23778959eb this is safer if a goroutine require fails (#10344)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-11 05:29:52 +00:00
kasey
92278e2255 fix TestValidator_WaitForKeymanagerInitialization_Web race (#10343)
* TestValidator_WaitForKeymanagerInitialization_Web

* gofmt

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-03-10 20:04:27 -06:00
667 changed files with 20794 additions and 7993 deletions

View File

@@ -217,3 +217,6 @@ build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -1 +1 @@
4.2.2
5.0.0

View File

@@ -10,26 +10,26 @@
# Prysm specific remote-cache properties.
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_toplevel
build:remote-cache --remote_download_minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache
build:remote-cache --experimental_action_cache_store_output_metadata
build:remote-cache --experimental_remote_cache_compression
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote-cache --incompatible_strict_action_env=true
build --experimental_use_hermetic_linux_sandbox
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc

View File

@@ -1,13 +1,13 @@
#!/bin/sh -l
set -e
export PATH=$PATH:/usr/local/go/bin
export PATH="$PATH:/usr/local/go/bin"
cd $GITHUB_WORKSPACE
cd "$GITHUB_WORKSPACE"
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy
go mod tidy -compat=1.17
echo "Checking go.mod and go.sum:"
checks=0

View File

@@ -1,41 +0,0 @@
name: Update DAppNodePackages
on:
push:
tags:
- '*'
jobs:
dappnode-update-beacon-chain:
name: Trigger a beacon-chain release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage-prysm-beacon-chain
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-beacon-chain
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches
dappnode-update-validator:
name: Trigger a validator release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage validator repository
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-validator
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches

View File

@@ -7,13 +7,12 @@ on:
branches: [ '*' ]
jobs:
check:
name: Check
formatting:
name: Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@v2
- name: Go mod tidy checker
id: gomodtidy
@@ -31,15 +30,43 @@ jobs:
with:
goimports-path: ./
- name: Gosec security scanner
uses: securego/gosec@master
gosec:
name: Gosec scan
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
uses: actions/setup-go@v3
with:
args: '-exclude=G307 -exclude-dir=crypto/bls/herumi ./...'
go-version: 1.17
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@latest
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
uses: actions/setup-go@v3
with:
go-version: 1.17
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v2
with:
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.17
version: v1.45.2
skip-go-installation: true
build:
name: Build
@@ -48,7 +75,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.14
go-version: 1.17
id: go
- name: Check out code into the Go module directory

View File

@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"
@@ -16,6 +16,8 @@ exports_files([
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -125,6 +127,7 @@ nogo(
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],

View File

@@ -117,13 +117,6 @@ http_archive(
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1569509514 +0300",
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -222,7 +215,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.9"
consensus_spec_version = "v1.1.10"
bls_test_version = "v0.1.1"
@@ -238,7 +231,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "207d9c326ba4fa1f34bab7b6169201c32f2611755db030909a3405873445e0ba",
sha256 = "28043009cc2f6fc9804e73c8c1fc2cb27062f1591e6884f3015ae1dd7a276883",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -254,7 +247,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "a3995b39f412db236b2f1db909f288218da53cb53b9923b71dda9d144d68f40a",
sha256 = "bc1a283ca068f310f04d70c4f6a8eaa0b8f7e9318073a8bdc2ee233111b4e339",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -270,7 +263,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "76cea7a4c8e32d458ad456b54bfbb30bc772481a91954a4cd97e229aa3023b1d",
sha256 = "bbabb482c229ff9d4e2c7b77c992edb452f9d0af7c6d8dd4f922f06a7b101e81",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -285,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "0fc429684775f943250dce1f9c485ac25e26c6395d7f585c8d1317becec2ace7",
sha256 = "408a5524548ad3fcf387f65ac7ec52781d9ee899499720bb12451b48a15818d4",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -0,0 +1,54 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_mod//semver:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"checkpoint_test.go",
"client_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -0,0 +1,262 @@
package beacon
import (
"context"
"fmt"
"path"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
wsd *WeakSubjectivityData
sb []byte
bb []byte
st state.BeaconState
b block.SignedBeaconBlock
cf *detect.VersionedUnmarshaler
}
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch for the
// SignedBeaconBlock value found by DownloadOriginData.
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (od *OriginData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", od.wsd.BlockRoot, od.wsd.Epoch)
}
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.sb)
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
return statePath, file.WriteFile(statePath, od.sb)
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
func (od *OriginData) StateBytes() []byte {
return od.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
func (od *OriginData) BlockBytes() []byte {
return od.bb
}
func fname(prefix string, cf *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.Config.ConfigName, version.String(cf.Fork), slot, root)
}
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err
}
cf, err := detect.FromState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
headState, err := cf.UnmarshalBeaconState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
}
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, cf.Config)
if err != nil {
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
}
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
return epoch, nil
}
const (
prysmMinimumVersion = "v2.0.7"
prysmImplementationName = "Prysm"
)
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
// for older endpoints or clients that do not support the weak_subjectivity api method
// we gather the necessary data for a checkpoint sync by:
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
// - requesting the state at the first slot of the epoch
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
// - requesting that block by its root
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
nv, err := client.GetNodeVersion(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
}
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
}
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
}
// use first slot of the epoch for the state slot
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
// get the state at the first slot of the epoch
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
cf, err := detect.FromState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
st, err := cf.UnmarshalBeaconState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
// compute state and block roots
stateRoot, err := st.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
}
header := st.LatestBlockHeader()
header.StateRoot = stateRoot[:]
computedBlockRoot, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %d", computedBlockRoot)
}
block, err := cf.UnmarshalBeaconBlock(blockBytes)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
blockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
}
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
log.Printf("BeaconBlock root computed from state=%#x, Block htr=%#x", computedBlockRoot, blockRoot)
return &OriginData{
wsd: &WeakSubjectivityData{
BlockRoot: blockRoot,
StateRoot: stateRoot,
Epoch: epoch,
},
st: st,
sb: stateBytes,
b: block,
bb: blockBytes,
cf: cf,
}, nil
}
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
// different technique where we first download the head state which can be used to compute the
// weak subjectivity epoch on the client side.
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method
return downloadBackwardsCompatible(ctx, client)
}
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
// use first slot of the epoch for the block slot
slot, err := slots.EpochStart(ws.Epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", ws.Epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
cf, err := detect.FromState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
state, err := cf.UnmarshalBeaconState(stateBytes)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
stateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
}
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
}
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
block, err := cf.UnmarshalBeaconBlock(blockBytes)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
realBlockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
return &OriginData{
wsd: ws,
st: state,
b: block,
sb: stateBytes,
bb: blockBytes,
cf: cf,
}, nil
}

View File

@@ -0,0 +1,407 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/network/forks"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/testing/require"
)
type testRT struct {
rt func(*http.Request) (*http.Response, error)
}
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
if rt.rt != nil {
return rt.rt(req)
}
return nil, errors.New("RoundTripper not implemented")
}
var _ http.RoundTripper = &testRT{}
func marshalToEnvelope(val interface{}) ([]byte, error) {
raw, err := json.Marshal(val)
if err != nil {
return nil, errors.Wrap(err, "error marshaling value to place in data envelope")
}
env := struct {
Data json.RawMessage `json:"data"`
}{
Data: raw,
}
return json.Marshal(env)
}
func TestMarshalToEnvelope(t *testing.T) {
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
require.Equal(t, expected, string(encoded))
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
host: "localhost:3500",
scheme: "http",
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
ctx := context.Background()
_, err := DownloadOriginData(ctx, c)
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
}
func TestFname(t *testing.T) {
vu := &detect.VersionedUnmarshaler{
Config: params.MainnetConfig(),
Fork: version.Phase0,
}
slot := types.Slot(23)
prefix := "block"
var root [32]byte
copy(root[:], []byte{0x23, 0x23, 0x23})
expected := "block_mainnet_phase0_23-0x2323230000000000000000000000000000000000000000000000000000000000.ssz"
actual := fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
vu.Config = params.MinimalSpecConfig()
vu.Fork = version.Altair
slot = 17
prefix = "state"
copy(root[29:], []byte{0x17, 0x17, 0x17})
expected = "state_minimal_altair_17-0x2323230000000000000000000000000000000000000000000000000000171717.ssz"
actual = fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
}
func TestDownloadOriginData(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(epoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
require.NoError(t, wrapper.SetProposerIndex(b, 0))
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
expectedWSD := WeakSubjectivityData{
BlockRoot: bRoot,
StateRoot: wRoot,
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
od, err := DownloadOriginData(ctx, c)
require.NoError(t, err)
require.Equal(t, expectedWSD.Epoch, od.wsd.Epoch)
require.Equal(t, expectedWSD.StateRoot, od.wsd.StateRoot)
require.Equal(t, expectedWSD.BlockRoot, od.wsd.BlockRoot)
require.DeepEqual(t, wsSerialized, od.sb)
require.DeepEqual(t, serBlock, od.bb)
require.DeepEqual(t, wst.Fork().CurrentVersion, od.cf.Version[:])
require.DeepEqual(t, version.Phase0, od.cf.Fork)
}
// runs downloadBackwardsCompatible directly
// and via DownloadOriginData with a round tripper that triggers the backwards compatible code path
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
st, expectedEpoch := defaultTestHeadState(t, cfg)
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(expectedEpoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
require.NoError(t, wrapper.SetProposerIndex(b, 0))
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
odPub, err := DownloadOriginData(ctx, c)
require.NoError(t, err)
odPriv, err := downloadBackwardsCompatible(ctx, c)
require.NoError(t, err)
require.DeepEqual(t, odPriv.wsd, odPub.wsd)
require.DeepEqual(t, odPriv.sb, odPub.sb)
require.DeepEqual(t, odPriv.bb, odPub.bb)
require.DeepEqual(t, odPriv.cf.Fork, odPub.cf.Fork)
require.DeepEqual(t, odPriv.cf.Version, odPub.cf.Version)
}
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
host: "localhost:3500",
scheme: "http",
}
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
return nil, err
}
prevVersion, err := os.Previous(currentVersion)
if err != nil {
if !errors.Is(err, forks.ErrNoPreviousVersion) {
return nil, err
}
// use same version for both in the case of genesis
prevVersion = currentVersion
}
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
return &ethpb.Fork{
PreviousVersion: prevVersion[:],
CurrentVersion: currentVersion[:],
Epoch: forkEpoch,
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, types.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
slot, err := slots.EpochStart(cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetSlot(slot))
var validatorCount, avgBalance uint64 = 100, 35
require.NoError(t, populateValidators(cfg, st, validatorCount, avgBalance))
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: fork.Epoch - 10,
Root: make([]byte, 32),
}))
// to see the math for this, look at helpers.LatestWeakSubjectivityEpoch
// and for the values use mainnet config values, the validatorCount and avgBalance above, and altair fork epoch
expectedEpoch := slots.ToEpoch(st.Slot()) - 224
return st, expectedEpoch
}
// TODO(10429): refactor beacon state options in testing/util to take a state.BeaconState so this can become an option
func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, valCount, avgBalance uint64) error {
validators := make([]*ethpb.Validator, valCount)
balances := make([]uint64, len(validators))
for i := uint64(0); i < valCount; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, cfg.BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: avgBalance * 1e9,
ExitEpoch: cfg.FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
if err := st.SetValidators(validators); err != nil {
return err
}
if err := st.SetBalances(balances); err != nil {
return err
}
return nil
}

446
api/client/beacon/client.go Normal file
View File

@@ -0,0 +1,446 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/network/forks"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
//
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>."
type StateOrBlockId string
const (
IdFinalized StateOrBlockId = "finalized"
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
IdJustified StateOrBlockId = "justified"
)
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
return StateOrBlockId(fmt.Sprintf("%#x", r))
}
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s types.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
}
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
func idTemplate(ts string) func(StateOrBlockId) string {
t := template.Must(template.New("").Parse(ts))
f := func(id StateOrBlockId) string {
b := bytes.NewBuffer(nil)
err := t.Execute(b, struct{ Id string }{Id: string(id)})
if err != nil {
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
}
return b.String()
}
// run the template to ensure that it is valid
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
_ = f(IdGenesis)
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
host string
scheme string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
host, err := validHostname(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
scheme: "http",
host: host,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func validHostname(h string) (string, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u.Host, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%s", host, port), nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
u := &url.URL{
Scheme: c.scheme,
Host: c.host,
}
return u.String()
}
func (c *Client) urlForPath(methodPath string) *url.URL {
u := &url.URL{
Scheme: c.scheme,
Host: c.host,
}
u.Path = path.Join(u.Path, methodPath)
return u
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.urlForPath(path)
log.Printf("requesting %s", u.String())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
return b, nil
}
var getBlockRootTpl = idTemplate(getBlockRootPath)
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
jsonr := &struct{ Data struct{ Root string } }{}
err = json.Unmarshal(b, jsonr)
if err != nil {
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
}
rs, err := hexutil.Decode(jsonr.Data.Root)
if err != nil {
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
}
return bytesutil.ToBytes32(rs), nil
}
var getForkTpl = idTemplate(getForkForStatePath)
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
fr := &forkResponse{}
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
err = json.Unmarshal(body, dataWrapper)
if err != nil {
return nil, errors.Wrap(err, "error decoding json response in GetFork")
}
return fr.Fork()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
type NodeVersion struct {
implementation string
semver string
systemInfo string
}
var versionRE = regexp.MustCompile(`^(\w+)\/(v\d+\.\d+\.\d+) \((.*)\)$`)
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
semver: groups[2],
systemInfo: groups[3],
}, nil
}
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
d := struct {
Data struct {
Version string `json:"version"`
} `json:"data"`
}{}
err = json.Unmarshal(b, &d)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
}
return parseNodeVersion(d.Data.Version)
}
func renderGetStatePath(id StateOrBlockId) string {
return path.Join(getStatePath, string(id))
}
// GetState retrieves the BeaconState for the given state id.
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
return b, nil
}
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
// This api method does the following:
// - computes weak subjectivity epoch
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
v := &apimiddleware.WeakSubjectivityResponse{}
err = json.Unmarshal(body, v)
if err != nil {
return nil, err
}
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
if err != nil {
return nil, err
}
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
if err != nil {
return nil, err
}
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
if err != nil {
return nil, err
}
return &WeakSubjectivityData{
Epoch: types.Epoch(epoch),
BlockRoot: bytesutil.ToBytes32(blockRoot),
StateRoot: bytesutil.ToBytes32(stateRoot),
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
// a new Beacon Node using Checkpoint Sync.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch types.Epoch
}
func non200Err(response *http.Response) error {
bodyBytes, err := ioutil.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`
Epoch string `json:"epoch"`
}
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
if err != nil {
return nil, err
}
cSlice, err := hexutil.Decode(f.CurrentVersion)
if err != nil {
return nil, err
}
if len(cSlice) != 4 {
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
}
pSlice, err := hexutil.Decode(f.PreviousVersion)
if err != nil {
return nil, err
}
if len(pSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
}
return &ethpb.Fork{
CurrentVersion: cSlice,
PreviousVersion: pSlice,
Epoch: types.Epoch(epoch),
}, nil
}
type forkScheduleResponse struct {
Data []forkResponse
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
if err != nil {
return nil, err
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: types.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -0,0 +1,58 @@
package beacon
import (
"testing"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestParseNodeVersion(t *testing.T) {
cases := []struct {
name string
v string
err error
nv *NodeVersion
}{
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
},
{
name: "implementation and semver only",
v: "Prysm/v2.0.6",
err: ErrInvalidNodeVersion,
},
{
name: "complete version",
v: "Prysm/v2.0.6 (linux amd64)",
nv: &NodeVersion{
implementation: "Prysm",
semver: "v2.0.6",
systemInfo: "linux amd64",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
nv, err := parseNodeVersion(c.v)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
require.DeepEqual(t, c.nv, nv)
}
})
}
}

6
api/client/beacon/doc.go Normal file
View File

@@ -0,0 +1,6 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -0,0 +1,13 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -51,12 +51,12 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
@@ -106,7 +106,6 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_engine_test.go",
"mock_test.go",
"optimistic_sync_test.go",
"pow_block_test.go",
@@ -130,6 +129,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/fieldparams:go_default_library",
@@ -183,6 +183,7 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -7,6 +7,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -323,13 +325,55 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return false, nil
}
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, s.head.root)
return s.IsOptimisticForRoot(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
if err == nil {
return optimistic, nil
}
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return false, err
}
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
if err != nil {
return false, err
}
if ss == nil {
return false, errInvalidNilSummary
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
return true, nil
}
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
return false, nil
}
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
if err != nil {
return false, err
}
if ss.Slot > lastValidated.Slot {
return true, nil
}
isCanonical, err := s.IsCanonical(ctx, root)
if err != nil {
return false, err
}
// historical non-canonical blocks here are returned as optimistic for safety.
return !isCanonical, nil
}
// SetGenesisTime sets the genesis time of beacon chain.

View File

@@ -16,10 +16,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
@@ -28,24 +31,32 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := wrapper.WrappedSignedBeaconBlock(&ethpb.SignedBeaconBlock{})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
head: &head{block: wsb},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadBlock(context.Background())
_, err = s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
@@ -53,9 +64,12 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -133,7 +133,9 @@ func TestHeadRoot_UseDB(t *testing.T) {
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
@@ -146,8 +148,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
b.Block.Slot = 1
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
c := &Service{}
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
c.head = &head{block: wsb, state: s}
recevied, err := c.HeadBlock(context.Background())
require.NoError(t, err)
@@ -229,7 +233,9 @@ func TestIsCanonical_Ok(t *testing.T) {
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
@@ -288,11 +294,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
params.BeaconConfig().ZeroHash)}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
@@ -302,11 +308,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
@@ -384,8 +390,8 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -400,8 +406,8 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -419,8 +425,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
@@ -430,10 +436,156 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, true, validated)
}

View File

@@ -1,3 +1,4 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -9,4 +9,13 @@ var (
errNilBestJustifiedInStore = errors.New("nil best justified checkpoint returned from store")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or
// block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// block is not a valid optimistic candidate block
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
)

View File

@@ -24,8 +24,9 @@ import (
"go.opencensus.io/trace"
)
// UpdateHeadWithBalances updates the beacon state head after getting justified balanced from cache.
func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
cp := s.store.JustifiedCheckpt()
if cp == nil {
return errors.New("no justified checkpoint")
@@ -35,8 +36,11 @@ func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", cp.Root)
return errors.Wrap(err, msg)
}
return s.updateHead(ctx, balances)
headRoot, err := s.updateHead(ctx, balances)
if err != nil {
return errors.Wrap(err, "could not update head")
}
return s.saveHead(ctx, headRoot)
}
// This defines the current chain service's view of head.
@@ -49,18 +53,18 @@ type head struct {
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// Get head from the fork choice service.
f := s.store.FinalizedCheckpt()
if f == nil {
return errNilFinalizedInStore
return [32]byte{}, errNilFinalizedInStore
}
j := s.store.JustifiedCheckpt()
if j == nil {
return errNilJustifiedInStore
return [32]byte{}, errNilJustifiedInStore
}
// To get head before the first justified epoch, the fork choice will start with origin root
// instead of zero hashes.
@@ -76,26 +80,19 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
return [32]byte{}, err
}
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
} else {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
}
// TODO(10261) send optimistic status
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
return err
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return [32]byte{}, err
}
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
// Save head to the local service cache.
return s.saveHead(ctx, headRoot)
return s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
}
// This saves head info to the local service cache, it also saves the
@@ -149,16 +146,21 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
ExecutionOptimistic: isOptimistic,
},
})
@@ -180,7 +182,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -300,6 +302,7 @@ func (s *Service) hasHeadState() bool {
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
@@ -333,6 +336,10 @@ func (s *Service) notifyNewHeadEvent(
return errors.Wrap(err, "could not get duty dependent root")
}
}
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.NewHead,
Data: &ethpbv1.EventHead{
@@ -342,6 +349,7 @@ func (s *Service) notifyNewHeadEvent(
EpochTransition: slots.IsEpochStart(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot,
CurrentDutyDependentRoot: currentDutyDependentRoot,
ExecutionOptimistic: isOptimistic,
},
})
return nil

View File

@@ -9,6 +9,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
@@ -39,9 +41,10 @@ func TestSaveHead_Different(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
util.NewBeaconBlock()
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
@@ -55,7 +58,9 @@ func TestSaveHead_Different(t *testing.T) {
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
@@ -80,9 +85,10 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
@@ -98,7 +104,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := util.NewBeaconState()
@@ -139,15 +147,18 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
b := util.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
require.NoError(t, service.saveHead(context.Background(), headRoot))
}
func Test_notifyNewHeadEvent(t *testing.T) {
@@ -162,7 +173,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
}
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -197,7 +208,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -233,7 +244,9 @@ func TestSaveOrphanedAtts(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
@@ -259,7 +272,9 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
@@ -267,3 +282,43 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcp := &ethpb.Checkpoint{
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())
}

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
@@ -17,7 +18,7 @@ import (
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b block.BeaconBlock) {
func logStateTransitionData(b block.BeaconBlock) error {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -34,13 +35,23 @@ func logStateTransitionData(b block.BeaconBlock) {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() == version.Altair {
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
agg, err := b.Body().SyncAggregate()
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() == version.Bellatrix {
p, err := b.Body().ExecutionPayload()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
log = log.WithField("txCount", len(p.Transactions))
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {

View File

@@ -3,6 +3,7 @@ package blockchain
import (
"testing"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -11,6 +12,17 @@ import (
)
func Test_logStateTransitionData(t *testing.T) {
payloadBlk := &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
SyncAggregate: &ethpb.SyncAggregate{},
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: []byte{1, 2, 3},
Transactions: [][]byte{{}, {}},
},
},
}
wrappedPayloadBlk, err := wrapper.WrappedBeaconBlock(payloadBlk)
require.NoError(t, err)
tests := []struct {
name string
b block.BeaconBlock
@@ -55,11 +67,15 @@ func Test_logStateTransitionData(t *testing.T) {
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: wrappedPayloadBlk,
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}
for _, tt := range tests {
hook := logTest.NewGlobal()
t.Run(tt.name, func(t *testing.T) {
logStateTransitionData(tt.b)
require.NoError(t, logStateTransitionData(tt.b))
require.LogsContain(t, hook, tt.want)
})
}

View File

@@ -1,49 +0,0 @@
package blockchain
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
type mockEngineService struct {
newPayloadError error
forkchoiceError error
blks map[[32]byte]*enginev1.ExecutionBlock
}
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, m.newPayloadError
}
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, m.forkchoiceError
}
func (*mockEngineService) GetPayloadV1(
_ context.Context, _ enginev1.PayloadIDBytes,
) *enginev1.ExecutionPayload {
return nil
}
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
return nil, nil
}
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
return nil
}
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
return nil, nil
}
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
if !ok {
return nil, errors.New("block not found")
}
return blk, nil
}

View File

@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
}
ctx := context.Background()
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, false)) // genesis
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, false)) // finalized
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false)) // justified
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, false)) // best justified
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, false)) // bad
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
type args struct {
slot types.Slot
@@ -86,9 +86,9 @@ func TestService_newSlot(t *testing.T) {
for _, test := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
store := store.New(test.args.justified, test.args.finalized)
store.SetBestJustifiedCheckpt(test.args.bestJustified)
service.store = store
s := store.New(test.args.justified, test.args.finalized)
s.SetBestJustifiedCheckpt(test.args.bestJustified)
service.store = s
require.NoError(t, service.NewSlot(ctx, test.args.slot))
if test.args.shouldEqual {

View File

@@ -7,29 +7,28 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
return nil, errors.New("nil head block")
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
if isPreBellatrix(headBlk.Version()) {
return nil, nil
}
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not determine if block is execution block")
}
@@ -45,7 +44,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if isPreBellatrix(finalizedBlock.Block().Version()) {
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
@@ -65,7 +64,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
case powchain.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
@@ -76,85 +75,96 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
return payloadID, nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, header *ethpb.ExecutionPayloadHeader, postState state.BeaconState, blk block.SignedBeaconBlock) error {
if postState == nil {
return errors.New("pre and post states must not be nil")
}
// Execution payload is only supported in Bellatrix and beyond.
if isPreBellatrix(postState.Version()) {
return nil
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := helpers.BeaconBlockIsNil(blk); err != nil {
return err
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.ExecutionEnabled(postState, blk.Block().Body())
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return errors.Wrap(err, "could not determine if execution is enabled")
return false, errors.Wrap(err, "could not determine if execution is enabled")
}
if !enabled {
return nil
return true, nil
}
payload, err := body.ExecutionPayload()
if err != nil {
return errors.Wrap(err, "could not get execution payload")
return false, errors.Wrap(err, "could not get execution payload")
}
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
case powchain.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"slot": postState.Slot(),
"slot": blk.Block().Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return nil
return false, nil
default:
return errors.Wrap(err, "could not validate execution payload from execution engine")
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preStateVersion) {
return nil
if blocks.IsPreBellatrixVersion(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.
// To reach here, the block must have contained a valid payload.
return true, s.validateMergeBlock(ctx, blk)
}
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(header, body)
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(preStateHeader, body)
if err != nil {
return errors.Wrap(err, "could not check if merge block is terminal")
return true, errors.Wrap(err, "could not check if merge block is terminal")
}
if !atTransition {
return nil
return true, nil
}
return s.validateMergeBlock(ctx, blk)
}
// isPreBellatrix returns true if input version is before bellatrix fork.
func isPreBellatrix(v int) bool {
return v == version.Phase0 || v == version.Altair
return true, s.validateMergeBlock(ctx, blk)
}
// optimisticCandidateBlock returns true if this block can be optimistically synced.
//
// Spec pseudocode definition:
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
// block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
// return justified_is_execution_block or block_is_deep
// if is_execution_block(opt_store.blocks[block.parent_root]):
// return True
//
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
// return True
//
// return False
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return true, nil
}
j := s.store.JustifiedCheckpt()
if j == nil {
return false, errNilJustifiedInStore
}
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return false, err
}
return blocks.ExecutionBlock(jBlock.Block().Body())
if parent == nil {
return false, errNilParentInDB
}
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
if err != nil {
return false, err
}
return parentIsExecutionBlock, nil
}

View File

@@ -5,9 +5,11 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -18,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
@@ -36,7 +39,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
@@ -45,6 +47,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
tests := []struct {
name string
@@ -131,7 +134,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
newForkchoiceErr: powchain.ErrAcceptedSyncingPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
},
{
@@ -145,7 +148,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
newForkchoiceErr: powchain.ErrInvalidPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
},
@@ -153,9 +156,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, tt.finalizedRoot)
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -188,67 +190,97 @@ func Test_NotifyNewPayload(t *testing.T) {
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
},
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
preState state.BeaconState
postState state.BeaconState
blk block.SignedBeaconBlock
newPayloadErr error
errString string
name string
preState state.BeaconState
postState state.BeaconState
isValidPayload bool
blk block.SignedBeaconBlock
newPayloadErr error
errString string
}{
{
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
isValidPayload: true,
},
{
name: "altair post state",
postState: altairState,
preState: altairState,
name: "altair post state",
postState: altairState,
preState: altairState,
isValidPayload: true,
},
{
name: "nil post state",
preState: phase0State,
errString: "pre and post states must not be nil",
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
isValidPayload: false,
},
{
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrAcceptedSyncingPayloadStatus,
isValidPayload: false,
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
isValidPayload: false,
},
{
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
name: "altair pre state, altair block",
postState: bellatrixState,
preState: altairState,
blk: altairBlk,
isValidPayload: true,
},
{
name: "altair pre state",
name: "altair pre state, happy case",
postState: bellatrixState,
preState: altairState,
blk: bellatrixBlk,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
isValidPayload: false,
},
{
name: "not at merge transition",
@@ -275,13 +307,15 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
isValidPayload: false,
},
{
name: "happy case",
@@ -301,33 +335,86 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
return b
}(),
isValidPayload: true,
},
}
for _, tt := range tests {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
t.Run(tt.name, func(t *testing.T) {
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = e
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
root := [32]byte{'a'}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
isValidPayload, err := service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.isValidPayload, isValidPayload)
}
})
}
}
func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = e
payload, err := bellatrixState.LatestExecutionPayloadHeader()
require.NoError(t, err)
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
require.NoError(t, err)
validated, err := service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk)
require.NoError(t, err)
require.Equal(t, true, validated)
}
func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -347,6 +434,12 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
parentBlk := util.NewBeaconBlockBellatrix()
wrappedParentBlock, err := wrapper.WrappedSignedBeaconBlock(parentBlk)
require.NoError(t, err)
parentRoot, err := wrappedParentBlock.Block().HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
@@ -358,14 +451,16 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
@@ -376,14 +471,16 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 200
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 32
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
@@ -394,57 +491,183 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: false,
},
{
name: "shallow block, execution enabled justified chkpt",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: true,
},
}
for _, tt := range tests {
jroot, err := tt.justified.Block().HashTreeRoot()
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
&ethpb.Checkpoint{
Root: jroot[:],
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
require.NoError(t, err)
require.Equal(t, tt.want, candidate, tt.name)
}
}
func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
payload := &v1.ExecutionPayload{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
BlockHash: make([]byte, 32),
BlockNumber: 100,
}
body := &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
b := &ethpb.BeaconBlockBellatrix{Body: body, Slot: 200}
rawSigned := &ethpb.SignedBeaconBlockBellatrix{Block: b}
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
blkRoot, err := wr.Block().HashTreeRoot()
require.NoError(t, err)
childBlock := util.NewBeaconBlockBellatrix()
childBlock.Block.ParentRoot = blkRoot[:]
// shallow block
childBlock.Block.Slot = 201
wrappedChild, err := wrapper.WrappedSignedBeaconBlock(childBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
candidate, err := service.optimisticCandidateBlock(ctx, wrappedChild.Block())
require.NoError(t, err)
require.Equal(t, true, candidate)
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
stateGen := stategen.New(beaconDB)
fcs := protoarray.New(0, 0, [32]byte{})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wr))
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
params.BeaconConfig().ZeroHash, 0, 0))
genesisSummary := &ethpb.StateSummary{
Root: genesisStateRoot[:],
Slot: 0,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, genesisSummary))
// Get last validated checkpoint
origCheckpoint, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, origCheckpoint))
// Optimistic finalized checkpoint
blk := util.NewBeaconBlock()
blk.Block.Slot = 320
blk.Block.ParentRoot = genesisRoot[:]
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
opRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
opCheckpoint := &ethpb.Checkpoint{
Root: opRoot[:],
Epoch: 10,
}
opStateSummary := &ethpb.StateSummary{
Root: opRoot[:],
Slot: 320,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
params.BeaconConfig().ZeroHash, 10, 10))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
require.DeepEqual(t, origCheckpoint.Root, cp.Root)
require.Equal(t, origCheckpoint.Epoch, cp.Epoch)
// Validated finalized checkpoint
blk = util.NewBeaconBlock()
blk.Block.Slot = 640
blk.Block.ParentRoot = opRoot[:]
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
validRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
validCheckpoint := &ethpb.Checkpoint{
Root: validRoot[:],
Epoch: 20,
}
validSummary := &ethpb.StateSummary{
Root: validRoot[:],
Slot: 640,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
params.BeaconConfig().ZeroHash, 20, 20))
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
cp, err = service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
optimistic, err := service.IsOptimisticForRoot(ctx, validRoot)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -52,7 +51,7 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
}
// WithExecutionEngineCaller to call execution engine.
func WithExecutionEngineCaller(c enginev1.Caller) Option {
func WithExecutionEngineCaller(c powchain.EngineCaller) Option {
return func(s *Service) error {
s.cfg.ExecutionEngineCaller = c
return nil

View File

@@ -9,6 +9,7 @@ import (
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -117,13 +118,13 @@ func Test_validateMergeBlock(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
@@ -158,12 +159,12 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
h := [32]byte{'a'}
p := [32]byte{'b'}
td := "0x1"
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: td,
}
@@ -175,18 +176,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
require.ErrorContains(t, "could not get pow block: block not found", err)
engine.blks[h] = nil
engine.BlockByHashMap[h] = nil
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "pow block is nil", err)
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "1",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
engine.blks[h] = &enginev1.ExecutionBlock{
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
}

View File

@@ -39,13 +39,17 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -56,7 +60,9 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
@@ -145,13 +151,17 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -162,7 +172,9 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
@@ -256,7 +268,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -282,7 +294,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -438,7 +450,9 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
@@ -455,7 +469,9 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
@@ -478,7 +494,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -486,7 +504,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -509,7 +529,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -517,7 +539,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -534,7 +558,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -543,7 +569,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -571,8 +599,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)

View File

@@ -6,11 +6,13 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
@@ -107,20 +109,42 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err != nil {
return err
}
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, postState, signed); err != nil {
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
}
isValidPayload, err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed)
if err != nil {
return errors.Wrap(err, "could not verify new payload")
}
if !isValidPayload {
candidate, err := s.optimisticCandidateBlock(ctx, b)
if err != nil {
return errors.Wrap(err, "could not check if block is optimistic candidate")
}
if !candidate {
return errNotOptimisticCandidate
}
}
// TODO(10261) Check optimistic status
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /*optimistic sync*/); err != nil {
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
// We add a proposer score boost to fork choice for the block root if applicable, right after
// running a successful state transition for the block.
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
ctx, signed.Block().Slot(), blockRoot, s.genesisTime,
); err != nil {
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: signed.Block().Slot(),
CurrentSlot: slots.SinceGenesis(s.genesisTime),
SecondsIntoSlot: secondsIntoSlot,
}); err != nil {
return err
}
@@ -180,12 +204,16 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
if err := s.updateHead(ctx, balances); err != nil {
headRoot, err := s.updateHead(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), bytesutil.ToBytes32(finalized.Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
if err := s.saveHead(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head")
}
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
@@ -230,14 +258,19 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: signed.Block().StateRoot(),
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: signed.Block().StateRoot(),
ExecutionOptimistic: isOptimistic,
},
})
@@ -258,14 +291,17 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(preState state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader *ethpb.ExecutionPayloadHeader
var err error
preStateVersion := preState.Version()
preStateVersion := st.Version()
switch preStateVersion {
case version.Phase0, version.Altair:
default:
preStateHeader, err = preState.LatestExecutionPayloadHeader()
preStateHeader, err = st.LatestExecutionPayloadHeader()
if err != nil {
return 0, nil, err
}
@@ -281,6 +317,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
if len(blks) == 0 || len(blockRoots) == 0 {
return nil, nil, errors.New("no blocks provided")
}
if len(blks) != len(blockRoots) {
return nil, nil, errWrongBlockCount
}
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
return nil, nil, err
}
@@ -305,9 +346,24 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
PublicKeys: []bls.PublicKey{},
Messages: [][32]byte{},
}
type versionAndHeader struct {
version int
header *ethpb.ExecutionPayloadHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
var set *bls.SignatureBatch
boundaries := make(map[[32]byte]state.BeaconState)
for i, b := range blks {
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
}
preVersionAndHeaders[i] = &versionAndHeader{
version: v,
header: h,
}
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, err
@@ -315,12 +371,18 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
if err := s.handleEpochBoundary(ctx, preState); err != nil {
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
}
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
v, h, err = getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
}
postVersionAndHeaders[i] = &versionAndHeader{
version: v,
header: h,
}
sigSet.Join(set)
}
verify, err := sigSet.Verify()
@@ -330,6 +392,43 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
if !verify {
return nil, nil, errors.New("batch block signature verification failed")
}
// blocks have been verified, add them to forkchoice and call the engine
for i, b := range blks {
s.saveInitSyncBlock(blockRoots[i], b)
isValidPayload, err := s.notifyNewPayload(ctx,
preVersionAndHeaders[i].version,
postVersionAndHeaders[i].version,
preVersionAndHeaders[i].header,
postVersionAndHeaders[i].header, b)
if err != nil {
return nil, nil, err
}
if !isValidPayload {
candidate, err := s.optimisticCandidateBlock(ctx, b.Block())
if err != nil {
return nil, nil, errors.Wrap(err, "could not check if block is optimistic candidate")
}
if !candidate {
return nil, nil, errNotOptimisticCandidate
}
}
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
return nil, nil, err
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
}
}
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
return nil, nil, err
}
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, err
@@ -350,13 +449,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
b := signed.Block()
s.saveInitSyncBlock(blockRoot, signed)
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
return err
}
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: signed.Block().Slot(),
@@ -442,13 +535,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
st state.BeaconState, optimistic bool) error {
st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
defer span.End()
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block's attestations to fork choice store.
@@ -467,24 +560,37 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
jCheckpoint.Epoch,
fCheckpoint.Epoch, optimistic); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
payloadHash, err := getBlockPayloadHash(blk)
if err != nil {
return err
}
return nil
return s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), payloadHash,
jCheckpoint.Epoch,
fCheckpoint.Epoch)
}
func getBlockPayloadHash(blk block.BeaconBlock) ([32]byte, error) {
payloadHash := [32]byte{}
if blocks.IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil
}
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return payloadHash, err
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -495,7 +601,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil

View File

@@ -247,10 +247,19 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
}
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
if err != nil {
return err
}
if !optimistic {
err = s.cfg.BeaconDB.SaveLastValidatedCheckpoint(ctx, cp)
if err != nil {
return err
}
}
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
return nil
}
@@ -366,14 +375,17 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r := pendingRoots[i]
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
payloadHash, err := getBlockPayloadHash(blk)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), payloadHash,
jCheckpoint.Epoch,
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
}
return nil
}

View File

@@ -18,6 +18,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
@@ -50,7 +51,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -61,7 +64,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
wsb, err = wrapper.WrappedSignedBeaconBlock(random)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
@@ -126,7 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
wsb, err = wrapper.WrappedSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
@@ -147,7 +154,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -158,7 +167,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
wsb, err = wrapper.WrappedSignedBeaconBlock(random)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
@@ -223,7 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
@@ -241,7 +254,13 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, 0, [32]byte{'A'}, time.Now()))
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: [32]byte{'A'},
BlockSlot: types.Slot(0),
CurrentSlot: 0,
SecondsIntoSlot: 0,
}
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0,
params.BeaconConfig().ZeroHash, []uint64{}, 0)
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
@@ -260,13 +279,17 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -278,15 +301,21 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
@@ -295,6 +324,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
@@ -312,13 +343,17 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -330,15 +365,21 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
@@ -347,10 +388,66 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(params.BeaconConfig().ZeroHash[:])
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
bState := st.Copy()
var blks []block.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
blkCount := 4
for i := 1; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
require.Equal(t, blkCount-1, len(cp1))
require.Equal(t, blkCount-1, len(cp2))
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -373,8 +470,12 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -401,8 +502,12 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -430,8 +535,12 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(newJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(lastJustifiedBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -458,12 +567,16 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -489,12 +602,16 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -517,12 +634,16 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
@@ -536,7 +657,9 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b).Block()))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
@@ -552,7 +675,9 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, err)
signedBlock := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(signedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
@@ -591,7 +716,9 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -602,12 +729,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -632,7 +761,9 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -643,12 +774,15 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -673,7 +807,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -684,12 +820,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -717,7 +856,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -728,12 +869,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -762,7 +906,9 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
@@ -773,23 +919,29 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
@@ -815,7 +967,9 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
@@ -826,23 +980,29 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
@@ -922,7 +1082,9 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
if err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
@@ -990,7 +1152,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
}
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
@@ -1035,7 +1199,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
@@ -1077,10 +1241,12 @@ func TestAncestor_CanUseDB(t *testing.T) {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
wsb, err := wrapper.WrappedSignedBeaconBlock(beaconBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
@@ -1117,14 +1283,18 @@ func TestVerifyBlkDescendant(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b1)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
type args struct {
parentRoot [32]byte
@@ -1188,7 +1358,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
gBlk := util.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(gBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(gBlk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: gRoot[:]}))
beaconState, _ := util.DeterministicGenesisState(t, 32)
@@ -1264,7 +1436,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1316,7 +1490,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1380,7 +1556,9 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
@@ -1402,7 +1580,9 @@ func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}
@@ -1448,9 +1628,9 @@ func Test_getStateVersionAndPayload(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version, header, err := getStateVersionAndPayload(tt.st)
ver, header, err := getStateVersionAndPayload(tt.st)
require.NoError(t, err)
require.Equal(t, tt.version, version)
require.Equal(t, tt.version, ver)
require.DeepEqual(t, tt.header, header)
})
}

View File

@@ -161,14 +161,39 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
continue
}
if err := s.updateHead(s.ctx, balances); err != nil {
newHeadRoot, err := s.updateHead(s.ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
}
}
}()
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
return
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
_, err := s.notifyForkchoiceUpdate(s.ctx,
s.headBlock().Block(),
s.headRoot(),
bytesutil.ToBytes32(finalized.Root),
)
if err != nil {
log.WithError(err).Error("could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot); err != nil {
log.WithError(err).Error("could not save head")
}
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context) {
atts := s.cfg.AttPool.ForkchoiceAttestations()

View File

@@ -47,13 +47,17 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -74,13 +78,17 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -111,9 +119,45 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
}
func TestNotifyEngineIfChangedHead(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
hook.Reset()
b := util.NewBeaconBlock()
b.Block.Slot = 1
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r[:], Epoch: 0}
service.head = &head{
slot: 1,
root: r,
block: wsb,
}
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead(ctx, [32]byte{'b'})
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
}

View File

@@ -64,7 +64,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBloc
return err
}
// Log state transition data.
logStateTransitionData(blockCopy.Block())
if err := logStateTransitionData(blockCopy.Block()); err != nil {
return err
}
return nil
}
@@ -86,8 +88,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
for i, b := range blocks {
blockCopy := b.Copy()
// TODO(10261) check optimistic status
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i], false /*optimistic status*/); err != nil {
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}

View File

@@ -132,6 +132,7 @@ func TestService_ReceiveBlock(t *testing.T) {
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
WithFinalizedStateAtStartUp(genesis),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -143,7 +144,9 @@ func TestService_ReceiveBlock(t *testing.T) {
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wsb, root)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -184,7 +187,9 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.ReceiveBlock(ctx, wsb, root))
wg.Done()
}()
wg.Wait()
@@ -260,7 +265,9 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
blks := []block.SignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {
@@ -282,7 +289,9 @@ func TestService_HasInitSyncBlock(t *testing.T) {
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
}
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()))
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
if !s.HasInitSyncBlock(r) {
t.Error("Should have block")
}

View File

@@ -3,6 +3,7 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"runtime"
@@ -28,7 +29,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
@@ -87,7 +87,7 @@ type config struct {
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher powchain.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller enginev1.Caller
ExecutionEngineCaller powchain.EngineCaller
}
// NewService instantiates a new block service instance that will
@@ -127,7 +127,7 @@ func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
if saved != nil && !saved.IsNil() {
if err := s.startFromSavedState(saved); err != nil {
if err := s.StartFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
@@ -164,9 +164,9 @@ func (s *Service) Status() error {
return nil
}
func (s *Service) startFromSavedState(saved state.BeaconState) error {
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)
@@ -190,22 +190,45 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
}
s.store = store.New(justified, finalized)
var store f.ForkChoicer
var f f.ForkChoicer
fRoot := bytesutil.ToBytes32(finalized.Root)
if features.Get().EnableForkChoiceDoublyLinkedTree {
store = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
f = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
} else {
store = protoarray.New(justified.Epoch, finalized.Epoch, bytesutil.ToBytes32(finalized.Root))
f = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
}
s.cfg.ForkChoiceStore = store
ss, err := slots.EpochStart(finalized.Epoch)
s.cfg.ForkChoiceStore = f
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
if err != nil {
return errors.Wrap(err, "could not get start slot of finalized epoch")
return errors.Wrap(err, "could not get finalized checkpoint block")
}
if fb == nil {
return errNilFinalizedInStore
}
payloadHash, err := getBlockPayloadHash(fb.Block())
if err != nil {
return errors.Wrap(err, "could not get execution payload hash")
}
fSlot := fb.Block().Slot()
if err := f.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := f.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
h := s.headBlock().Block()
if h.Slot() > ss {
if h.Slot() > fSlot {
log.WithFields(logrus.Fields{
"startSlot": ss,
"startSlot": fSlot,
"endSlot": h.Slot(),
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, finalized, justified); err != nil {
@@ -233,7 +256,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
// first check if we have started from checkpoint sync and have a root
originRoot, err := s.cfg.BeaconDB.OriginBlockRoot(ctx)
originRoot, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err == nil {
return originRoot, nil
}
@@ -241,7 +264,7 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
}
// we got here because OriginBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
// we got here because OriginCheckpointBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
// so we should have a value for GenesisBlock
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
if err != nil {
@@ -338,9 +361,9 @@ func (s *Service) startFromPOWChain() error {
defer stateSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.ChainStarted {
data, ok := event.Data.(*statefeed.ChainStartedData)
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
return
@@ -449,14 +472,22 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
payloadHash, err := getBlockPayloadHash(genesisBlk.Block())
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
genesisBlk.Block().Slot(),
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
payloadHash,
genesisCheckpoint.Epoch,
genesisCheckpoint.Epoch, false /* optimistic status */); err != nil {
genesisCheckpoint.Epoch); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
}
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil

View File

@@ -24,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
@@ -74,10 +75,14 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
endpoint := "http://127.0.0.1"
ctx := context.Background()
var web3Service *powchain.Service
var err error
srv, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
@@ -144,7 +149,9 @@ func TestChainStartStop_Initialized(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(1))
@@ -153,6 +160,11 @@ func TestChainStartStop_Initialized(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
ss := &ethpb.StateSummary{
Slot: 1,
Root: blkRoot[:],
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
chainService.Start()
@@ -174,12 +186,16 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: blkRoot[:]}))
chainService.cfg.FinalizedStateAtStartUp = s
// Test the start function.
chainService.Start()
@@ -243,7 +259,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(0))
@@ -270,7 +288,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -284,13 +304,17 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.startFromSavedState(headState))
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, c.StartFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
@@ -314,7 +338,9 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -328,17 +354,27 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}))
ss := &ethpb.StateSummary{
Slot: finalizedSlot,
Root: headRoot[:],
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.startFromSavedState(headState))
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
assert.DeepEqual(t, genesis, c.head.block.Proto())
assert.DeepEqual(t, headBlock, c.head.block.Proto())
}
func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
@@ -359,14 +395,18 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
finalizedBlock := util.NewBeaconBlock()
finalizedBlock.Block.Slot = finalizedSlot
finalizedBlock.Block.ParentRoot = genesisRoot[:]
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(finalizedBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
// Set head slot close to the finalization point, no head sync is triggered.
headBlock := util.NewBeaconBlock()
@@ -374,15 +414,16 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: slots.ToEpoch(finalizedBlock.Block.Slot),
@@ -391,9 +432,10 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.startFromSavedState(headState))
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
@@ -409,7 +451,9 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headBlock.Block.ParentRoot = finalizedRoot[:]
headRoot, err = headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
@@ -438,7 +482,9 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
newState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)
@@ -455,12 +501,14 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -474,12 +522,14 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -494,10 +544,12 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
cancel: cancel,
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
}
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
@@ -522,9 +574,11 @@ func BenchmarkHasBlockDB(b *testing.B) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
block := util.NewBeaconBlock()
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
r, err := block.Block.HashTreeRoot()
blk := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wsb))
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
b.ResetTimer()
@@ -541,13 +595,15 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -562,13 +618,15 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -24,7 +24,7 @@ type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
// newStateBalanceCache exists to remind us that stateBalanceCache needs a state gen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {

View File

@@ -15,15 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
type mockStateByRoot struct {
state state.BeaconState
err error
}
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
type testStateOpt func(*ethpb.BeaconStateAltair)
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {

View File

@@ -61,6 +61,7 @@ type ChainService struct {
SyncCommitteePubkeys [][]byte
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
ReceiveBlockMockErr error
}
// ForkChoicer mocks the same method in the chain service
@@ -195,32 +196,35 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
if s.State == nil {
return ErrNilState
}
for _, block := range blks {
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
for _, b := range blks {
if !bytes.Equal(s.Root, b.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, b.Block().ParentRoot())
}
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
if err := s.State.SetSlot(b.Block().Slot()); err != nil {
return err
}
s.BlocksReceived = append(s.BlocksReceived, block)
signingRoot, err := block.Block().HashTreeRoot()
s.BlocksReceived = append(s.BlocksReceived, b)
signingRoot, err := b.Block().HashTreeRoot()
if err != nil {
return err
}
if s.DB != nil {
if err := s.DB.SaveBlock(ctx, block); err != nil {
if err := s.DB.SaveBlock(ctx, b); err != nil {
return err
}
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
}
s.Root = signingRoot[:]
s.Block = block
s.Block = b
}
return nil
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
if s.State == nil {
return ErrNilState
}
@@ -417,7 +421,7 @@ func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.
}
// HeadSyncCommitteeIndices mocks HeadSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, _ types.ValidatorIndex, _ types.Slot) ([]types.CommitteeIndex, error) {
return s.SyncCommitteeIndices, nil
}

View File

@@ -36,6 +36,7 @@ func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil
@@ -79,15 +80,17 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
if !v.db.HasBlock(ctx, v.root) {
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
}
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
endSlot := v.slot + params.BeaconConfig().SlotsPerEpoch
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(endSlot)
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
log.Infof("Searching block roots for weak subjectivity root=%#x, between slots %d-%d", v.root, v.slot, endSlot)
roots, err := v.db.BlockRoots(ctx, filter)
if err != nil {
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
}
for _, root := range roots {
if v.root == root {
log.Info("Weak subjectivity check has passed")
log.Info("Weak subjectivity check has passed!!")
v.verified = true
return nil
}

View File

@@ -14,58 +14,65 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
b := util.NewBeaconBlock()
b.Block.Slot = 32
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
b.Block.Slot = 1792480
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
blockEpoch := slots.ToEpoch(b.Block.Slot)
tests := []struct {
wsVerified bool
disabled bool
wantErr error
checkpt *ethpb.Checkpoint
finalizedEpoch types.Epoch
name string
}{
{
name: "nil root and epoch",
},
{
name: "already verified",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 2,
wsVerified: true,
name: "nil root and epoch",
disabled: true,
},
{
name: "not yet to verify, ws epoch higher than finalized epoch",
checkpt: &ethpb.Checkpoint{Epoch: 2},
finalizedEpoch: 1,
checkpt: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: blockEpoch},
finalizedEpoch: blockEpoch - 1,
},
{
name: "can't find the block in DB",
checkpt: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), Epoch: 1},
finalizedEpoch: 3,
finalizedEpoch: blockEpoch + 1,
wantErr: errWSBlockNotFound,
},
{
name: "can't find the block corresponds to ws epoch in DB",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
finalizedEpoch: 3,
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch - 2}, // Root belongs in epoch 1.
finalizedEpoch: blockEpoch - 1,
wantErr: errWSBlockNotFoundInEpoch,
},
{
name: "can verify and pass",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: 1},
finalizedEpoch: 3,
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
finalizedEpoch: blockEpoch + 1,
},
{
name: "equal epoch",
checkpt: &ethpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
finalizedEpoch: blockEpoch,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},

View File

@@ -1,3 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -17,10 +18,12 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
)
var (
const (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = uint64(4)
maxBalanceCacheSize = int(4)
)
var (
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_miss",
@@ -42,7 +45,7 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(int(maxBalanceCacheSize)),
cache: lruwrpr.New(maxBalanceCacheSize),
}
}

View File

@@ -1,3 +1,4 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -1,3 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -19,11 +20,13 @@ import (
mathutil "github.com/prysmaticlabs/prysm/math"
)
var (
const (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = uint64(32)
maxCommitteesCacheSize = int(32)
)
var (
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "committee_cache_miss",
@@ -55,7 +58,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
inProgress: make(map[string]bool),
}
}

View File

@@ -1,3 +1,4 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass global committee caches.

View File

@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
require.NoError(t, err)
}
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
assert.DeepEqual(t, c.SortedIndices, indices)
}
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
}

View File

@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
}
k := cache.CommitteeCache.Keys()
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
assert.Equal(t, maxCommitteesCacheSize, len(k))
sort.Slice(k, func(i, j int) bool {
return k[i].(string) < k[j].(string)

View File

@@ -1,3 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,3 +1,4 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass proposer indices caches.

View File

@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
attesterCache := lruwrpr.New(cacheSize)
aggregatorCache := lruwrpr.New(cacheSize)
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))

View File

@@ -1,3 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,3 +1,4 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -104,34 +104,30 @@ func SetParticipationAndRewardProposer(
targetEpoch types.Epoch,
indices []uint64,
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
var epochParticipation []byte
var proposerRewardNumerator uint64
currentEpoch := time.CurrentEpoch(beaconState)
var err error
var stateErr error
if targetEpoch == currentEpoch {
epochParticipation, err = beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
stateErr = beaconState.ModifyCurrentParticipationBits(func(val []byte) ([]byte, error) {
propRewardNum, epochParticipation, err := EpochParticipation(beaconState, indices, val, participatedFlags, totalBalance)
if err != nil {
return nil, err
}
proposerRewardNumerator = propRewardNum
return epochParticipation, nil
})
} else {
epochParticipation, err = beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
stateErr = beaconState.ModifyPreviousParticipationBits(func(val []byte) ([]byte, error) {
propRewardNum, epochParticipation, err := EpochParticipation(beaconState, indices, val, participatedFlags, totalBalance)
if err != nil {
return nil, err
}
proposerRewardNumerator = propRewardNum
return epochParticipation, nil
})
}
proposerRewardNumerator, epochParticipation, err := EpochParticipation(beaconState, indices, epochParticipation, participatedFlags, totalBalance)
if err != nil {
return nil, err
}
if targetEpoch == currentEpoch {
if err := beaconState.SetCurrentParticipationBits(epochParticipation); err != nil {
return nil, err
}
} else {
if err := beaconState.SetPreviousParticipationBits(epochParticipation); err != nil {
return nil, err
}
if stateErr != nil {
return nil, stateErr
}
if err := RewardProposer(ctx, beaconState, proposerRewardNumerator); err != nil {

View File

@@ -48,7 +48,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -79,7 +79,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
time.PrevEpoch(beaconState),
time.CurrentEpoch(beaconState),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -108,13 +108,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
want := "source check point not equal to current justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -149,14 +149,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := "source check point not equal to previous justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
@@ -188,7 +188,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.ErrorContains(t, expected, err)
@@ -232,7 +232,7 @@ func TestProcessAttestations_OK(t *testing.T) {
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -421,7 +421,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
}
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
if err != nil && r != nil {

View File

@@ -45,6 +45,7 @@ go_library(
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//proto/prysm/v1alpha1/slashings:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -42,7 +42,9 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
_, err = ProcessBlockHeader(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = ProcessBlockHeader(context.Background(), s, wsb)
_ = err
}
}
@@ -141,7 +143,9 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
fuzzer.Fuzz(b)
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessRandao(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := ProcessRandao(context.Background(), s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}
@@ -260,7 +264,9 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer.Fuzz(b)
s, err := v1.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}

View File

@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
if ok := trie.VerifyMerkleProofWithDepth(
receiptRoot,
leaf[:],
int(beaconState.Eth1DepositIndex()),
beaconState.Eth1DepositIndex(),
deposit.Proof,
params.BeaconConfig().DepositContractTreeDepth,
); !ok {

View File

@@ -68,7 +68,9 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
assert.ErrorContains(t, "block.Slot 10 must be greater than state.LatestBlockHeader.Slot 10", err)
}
@@ -94,7 +96,9 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
block.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, block.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx+1])
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), beaconState, wsb)
want := "signature did not verify"
assert.ErrorContains(t, want, err)
}
@@ -136,7 +140,9 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
Signature: blockSig,
})
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "is different than block slot"
assert.ErrorContains(t, want, err)
}
@@ -175,7 +181,9 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
block.Block.ParentRoot = bytesutil.PadTo([]byte{'A'}, 32)
block.Signature = blockSig
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "does not match"
assert.ErrorContains(t, want, err)
}
@@ -217,7 +225,9 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
block.Block.ParentRoot = parentRoot[:]
block.Signature = blockSig
_, err = blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = blocks.ProcessBlockHeader(context.Background(), state, wsb)
want := "was previously slashed"
assert.ErrorContains(t, want, err)
}
@@ -266,7 +276,9 @@ func TestProcessBlockHeader_OK(t *testing.T) {
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
newState, err := blocks.ProcessBlockHeader(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
newState, err := blocks.ProcessBlockHeader(context.Background(), state, wsb)
require.NoError(t, err, "Failed to process block header got")
var zeroHash [32]byte
nsh := newState.LatestBlockHeader()

View File

@@ -2,7 +2,6 @@ package blocks
import (
"bytes"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -14,86 +13,96 @@ import (
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
)
// MergeTransitionComplete returns true if the transition to Bellatrix has completed.
// IsMergeTransitionComplete returns true if the transition to Bellatrix has completed.
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
//
// Spec code:
// def is_merge_transition_complete(state: BeaconState) -> bool:
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
//
// Deprecated: Use `IsMergeTransitionBlockUsingPayloadHeader` instead.
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
if st == nil {
return false, errors.New("nil state")
}
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
}
return !isEmptyHeader(h), nil
}
// MergeTransitionBlock returns true if the input block is the terminal merge block.
// Meaning the header in beacon state is `ExecutionPayloadHeader()` (i.e. empty).
// And the input block has a non-empty header.
//
// Spec code:
// def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
// return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeComplete, err := MergeTransitionComplete(st)
if err != nil {
return false, err
}
if mergeComplete {
return false, err
}
return ExecutionBlock(body)
}
// IsMergeTransitionBlockUsingPayloadHeader returns true if the input block is the terminal merge block.
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
// Terminal merge block must be associated with an empty payload header.
// This is an optimized version of MergeTransitionComplete where beacon state is not required as an argument.
func IsMergeTransitionBlockUsingPayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
// This assumes the header `h` is referenced as the parent state for block body `body.
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
if h == nil || body == nil {
return false, errors.New("nil header or block body")
}
if !isEmptyHeader(h) {
return false, nil
}
return ExecutionBlock(body)
return IsExecutionBlock(body)
}
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
//
// Spec code:
// def is_execution_block(block: BeaconBlock) -> bool:
// return block.body.execution_payload != ExecutionPayload()
func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
func IsExecutionBlock(body block.BeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
}
payload, err := body.ExecutionPayload()
if err != nil {
if strings.HasPrefix(err.Error(), "ExecutionPayload is not supported in") {
return false, nil
}
switch {
case errors.Is(err, wrapper.ErrUnsupportedField):
return false, nil
case err != nil:
return false, err
default:
}
return !isEmptyPayload(payload), nil
}
// ExecutionEnabled returns true if the beacon chain can begin executing.
// IsExecutionEnabled returns true if the beacon chain can begin executing.
// Meaning the payload header is beacon state is non-empty or the payload in block body is non-empty.
//
// Spec code:
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
// return is_merge_block(state, body) or is_merge_complete(state)
func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeBlock, err := MergeTransitionBlock(st, body)
func IsExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
if st == nil || body == nil {
return false, errors.New("nil state or block body")
}
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
}
if mergeBlock {
return IsExecutionEnabledUsingHeader(header, body)
}
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
if !isEmptyHeader(header) {
return true, nil
}
return MergeTransitionComplete(st)
return IsExecutionBlock(body)
}
// IsPreBellatrixVersion returns true if input version is before bellatrix fork.
func IsPreBellatrixVersion(v int) bool {
return v < version.Bellatrix
}
// ValidatePayloadWhenMergeCompletes validates if payload is valid versus input beacon state.
@@ -104,7 +113,7 @@ func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, e
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
complete, err := MergeTransitionComplete(st)
complete, err := IsMergeTransitionComplete(st)
if err != nil {
return err
}
@@ -226,6 +235,9 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
}
func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
if p == nil {
return true
}
if !bytes.Equal(p.ParentHash, make([]byte, fieldparams.RootLength)) {
return false
}

View File

@@ -18,7 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_MergeComplete(t *testing.T) {
func Test_IsMergeComplete(t *testing.T) {
tests := []struct {
name string
payload *ethpb.ExecutionPayloadHeader
@@ -160,7 +160,7 @@ func Test_MergeComplete(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
got, err := blocks.MergeTransitionComplete(st)
got, err := blocks.IsMergeTransitionComplete(st)
require.NoError(t, err)
if got != tt.want {
t.Errorf("mergeComplete() got = %v, want %v", got, tt.want)
@@ -169,187 +169,6 @@ func Test_MergeComplete(t *testing.T) {
}
}
func Test_MergeBlock(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: false,
},
{
name: "empty header, payload has parent hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has fee recipient",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has state root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has receipt root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has logs bloom",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has random",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has base fee",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has tx",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Transactions = [][]byte{{'a'}}
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has extra data",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block number",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockNumber = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas limit",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasLimit = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas used",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasUsed = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has timestamp",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.MergeTransitionBlock(st, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
tests := []struct {
name string
@@ -520,7 +339,7 @@ func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(tt.header, body)
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
@@ -556,20 +375,28 @@ func Test_IsExecutionBlock(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
wrappedBlock, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(t, err)
got, err := blocks.ExecutionBlock(wrappedBlock.Body())
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func Test_ExecutionEnabled(t *testing.T) {
func Test_IsExecutionEnabled(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
useAltairSt bool
want bool
}{
{
name: "use older than bellatrix state",
payload: emptyPayload(),
header: emptyPayloadHeader(),
useAltairSt: true,
want: false,
},
{
name: "empty header, empty payload",
payload: emptyPayload(),
@@ -619,10 +446,76 @@ func Test_ExecutionEnabled(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.ExecutionEnabled(st, body)
if tt.useAltairSt {
st, _ = util.DeterministicGenesisStateAltair(t, 1)
}
got, err := blocks.IsExecutionEnabled(st, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("ExecutionEnabled() got = %v, want %v", got, tt.want)
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "empty header, non-empty payload",
header: emptyPayloadHeader(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
want: true,
},
{
name: "non-empty header, non-empty payload",
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsExecutionEnabledUsingHeader(tt.header, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
}
})
}
@@ -830,7 +723,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := blocks.MergeTransitionComplete(st)
_, err := blocks.IsMergeTransitionComplete(st)
require.NoError(b, err)
}
}

View File

@@ -40,7 +40,9 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
}
want := "block randao: signature did not verify"
_, err = blocks.ProcessRandao(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = blocks.ProcessRandao(context.Background(), beaconState, wsb)
assert.ErrorContains(t, want, err)
}
@@ -57,11 +59,12 @@ func TestProcessRandao_SignatureVerifiesAndUpdatesLatestStateMixes(t *testing.T)
RandaoReveal: epochSignature,
},
}
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
newState, err := blocks.ProcessRandao(
context.Background(),
beaconState,
wrapper.WrappedPhase0SignedBeaconBlock(b),
wsb,
)
require.NoError(t, err, "Unexpected error processing block randao")
currentEpoch := time.CurrentEpoch(beaconState)

View File

@@ -83,7 +83,7 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
assert.NoError(t, err)
sig := keys[0].Sign(rt[:]).Marshal()
altairBlk.Signature = sig
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(altairBlk)
wsb, err := wrapper.WrappedSignedBeaconBlock(altairBlk)
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
}

View File

@@ -13,5 +13,6 @@ const (
// ReceivedBlockData is the data sent with ReceivedBlock events.
type ReceivedBlockData struct {
SignedBlock block.SignedBeaconBlock
SignedBlock block.SignedBeaconBlock
IsOptimistic bool
}

View File

@@ -15,29 +15,12 @@ go_library(
"weak_subjectivity.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//contracts/deposit:__pkg__",
"//crypto/keystore:__pkg__",
"//network/forks:__pkg__",
"//proto/prysm/v1alpha1:__subpackages__",
"//proto/prysm/v1alpha1/attestation:__pkg__",
"//runtime/interop:__pkg__",
"//slasher:__subpackages__",
"//testing/altair:__pkg__",
"//testing/benchmark/benchmark_files:__subpackages__",
"//testing/endtoend/evaluators:__pkg__",
"//testing/slasher/simulator:__pkg__",
"//testing/spectest:__subpackages__",
"//testing/util:__pkg__",
"//tools:__subpackages__",
"//validator:__subpackages__",
],
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//container/slice:go_default_library",
"//container/trie:go_default_library",

View File

@@ -1,7 +1,6 @@
package helpers
import (
"bytes"
"context"
"encoding/hex"
"errors"
@@ -12,9 +11,11 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/math"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -54,9 +55,9 @@ import (
// )
//
// return ws_period
func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState) (types.Epoch, error) {
func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) {
// Weak subjectivity period cannot be smaller than withdrawal delay.
wsp := uint64(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
wsp := uint64(cfg.MinValidatorWithdrawabilityDelay)
// Cardinality of active validator set.
N, err := ActiveValidatorCount(ctx, st, time.CurrentEpoch(st))
@@ -72,10 +73,10 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
if err != nil {
return 0, fmt.Errorf("cannot find total active balance of validators: %w", err)
}
t = t / N / params.BeaconConfig().GweiPerEth
t = t / N / cfg.GweiPerEth
// Maximum effective balance per validator.
T := params.BeaconConfig().MaxEffectiveBalance / params.BeaconConfig().GweiPerEth
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
// Validator churn limit.
delta, err := ValidatorChurnLimit(N)
@@ -84,14 +85,14 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
}
// Balance top-ups.
Delta := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxDeposits))
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
if delta == 0 || Delta == 0 {
return 0, errors.New("either validator churn limit or balance top-ups is zero")
}
// Safety decay, maximum tolerable loss of safety margin of FFG finality.
D := params.BeaconConfig().SafetyDecay
D := cfg.SafetyDecay
if T*(200+3*D) < t*(200+12*D) {
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
@@ -122,24 +123,24 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
// return current_epoch <= ws_state_epoch + ws_period
func IsWithinWeakSubjectivityPeriod(
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsCheckpoint *eth.WeakSubjectivityCheckpoint) (bool, error) {
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [fieldparams.RootLength]byte, wsEpoch types.Epoch, cfg *params.BeaconChainConfig) (bool, error) {
// Make sure that incoming objects are not nil.
if wsState == nil || wsState.IsNil() || wsState.LatestBlockHeader() == nil || wsCheckpoint == nil {
if wsState == nil || wsState.IsNil() || wsState.LatestBlockHeader() == nil {
return false, errors.New("invalid weak subjectivity state or checkpoint")
}
// Assert that state and checkpoint have the same root and epoch.
if !bytes.Equal(wsState.LatestBlockHeader().StateRoot, wsCheckpoint.StateRoot) {
if bytesutil.ToBytes32(wsState.LatestBlockHeader().StateRoot) != wsStateRoot {
return false, fmt.Errorf("state (%#x) and checkpoint (%#x) roots do not match",
wsState.LatestBlockHeader().StateRoot, wsCheckpoint.StateRoot)
wsState.LatestBlockHeader().StateRoot, wsStateRoot)
}
if slots.ToEpoch(wsState.Slot()) != wsCheckpoint.Epoch {
if slots.ToEpoch(wsState.Slot()) != wsEpoch {
return false, fmt.Errorf("state (%v) and checkpoint (%v) epochs do not match",
slots.ToEpoch(wsState.Slot()), wsCheckpoint.Epoch)
slots.ToEpoch(wsState.Slot()), wsEpoch)
}
// Compare given epoch to state epoch + weak subjectivity period.
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, wsState)
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, wsState, cfg)
if err != nil {
return false, fmt.Errorf("cannot compute weak subjectivity period: %w", err)
}
@@ -153,8 +154,8 @@ func IsWithinWeakSubjectivityPeriod(
// Within the weak subjectivity period, if two conflicting blocks are finalized, 1/3 - D (D := safety decay)
// of validators will get slashed. Therefore, it is safe to assume that any finalized checkpoint within that
// period is protected by this safety margin.
func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconState) (types.Epoch, error) {
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, st)
func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) {
wsPeriod, err := ComputeWeakSubjectivityPeriod(ctx, st, cfg)
if err != nil {
return 0, err
}
@@ -164,7 +165,7 @@ func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconSta
}
// ParseWeakSubjectivityInputString parses "blocks_root:epoch_number" string into a checkpoint.
func ParseWeakSubjectivityInputString(wsCheckpointString string) (*eth.Checkpoint, error) {
func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Checkpoint, error) {
if wsCheckpointString == "" {
return nil, nil
}
@@ -197,7 +198,7 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*eth.Checkpoin
return nil, err
}
return &eth.Checkpoint{
return &v1alpha1.Checkpoint{
Epoch: types.Epoch(epoch),
Root: bRoot,
}, nil

View File

@@ -48,18 +48,21 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
t.Run(fmt.Sprintf("valCount: %d, avgBalance: %d", tt.valCount, tt.avgBalance), func(t *testing.T) {
// Reset committee cache - as we need to recalculate active validator set for each test.
helpers.ClearCache()
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance))
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig())
require.NoError(t, err)
assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance)
})
}
}
type mockWsCheckpoint func() (stateRoot [32]byte, blockRoot [32]byte, e types.Epoch)
func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
tests := []struct {
name string
epoch types.Epoch
genWsState func() state.ReadOnlyBeaconState
genWsCheckpoint func() *ethpb.WeakSubjectivityCheckpoint
genWsCheckpoint mockWsCheckpoint
want bool
wantedErr string
}{
@@ -68,22 +71,8 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
genWsState: func() state.ReadOnlyBeaconState {
return nil
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
BlockRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
Epoch: 42,
}
},
wantedErr: "invalid weak subjectivity state or checkpoint",
},
{
name: "nil weak subjectivity checkpoint",
genWsState: func() state.ReadOnlyBeaconState {
return genState(t, 128, 32)
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return nil
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
return [32]byte{}, [32]byte{}, 42
},
wantedErr: "invalid weak subjectivity state or checkpoint",
},
@@ -99,11 +88,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
require.NoError(t, err)
return beaconState
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
Epoch: 42,
}
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
var sr [32]byte
copy(sr[:], bytesutil.PadTo([]byte("stateroot2"), 32))
return sr, [32]byte{}, 42
},
wantedErr: fmt.Sprintf("state (%#x) and checkpoint (%#x) roots do not match",
bytesutil.PadTo([]byte("stateroot1"), 32), bytesutil.PadTo([]byte("stateroot2"), 32)),
@@ -120,11 +108,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
require.NoError(t, err)
return beaconState
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
Epoch: 43,
}
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
var sr [32]byte
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
return sr, [32]byte{}, 43
},
wantedErr: "state (42) and checkpoint (43) epochs do not match",
},
@@ -140,11 +127,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
require.NoError(t, err)
return beaconState
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
Epoch: 42,
}
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
var sr [32]byte
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
return sr, [32]byte{}, 42
},
wantedErr: "cannot compute weak subjectivity period: no active validators found",
},
@@ -161,11 +147,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
require.NoError(t, err)
return beaconState
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
Epoch: 42,
}
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
var sr [32]byte
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
return sr, [32]byte{}, 42
},
want: false,
},
@@ -182,18 +167,18 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
require.NoError(t, err)
return beaconState
},
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
return &ethpb.WeakSubjectivityCheckpoint{
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
Epoch: 42,
}
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
var sr [32]byte
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
return sr, [32]byte{}, 42
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), tt.genWsCheckpoint())
sr, _, e := tt.genWsCheckpoint()
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig())
if tt.wantedErr != "" {
assert.Equal(t, false, got)
assert.ErrorContains(t, tt.wantedErr, err)

View File

@@ -17,13 +17,15 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["slot_epoch_test.go"],
embed = [":go_default_library"],
deps = [
":go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],

View File

@@ -46,9 +46,9 @@ func NextEpoch(state state.ReadOnlyBeaconState) types.Epoch {
return slots.ToEpoch(state.Slot()) + 1
}
// AltairCompatible returns if the input state `s` is altair compatible and input epoch `e` is higher equal than fork epoch.
func AltairCompatible(s state.BeaconState, e types.Epoch) bool {
return s.Version() == version.Altair && e >= params.BeaconConfig().AltairForkEpoch
// HigherEqualThanAltairVersionAndEpoch returns if the input state `s` has a higher version number than Altair state and input epoch `e` is higher equal than fork epoch.
func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e types.Epoch) bool {
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
}
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.

View File

@@ -1,14 +1,17 @@
package time
package time_test
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -42,7 +45,7 @@ func TestCurrentEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, time.CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
}
}
@@ -58,7 +61,7 @@ func TestPrevEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, time.PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
}
}
@@ -76,7 +79,7 @@ func TestNextEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, NextEpoch(state), "NextEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, time.NextEpoch(state), "NextEpoch(%d)", state.Slot())
}
}
@@ -108,7 +111,7 @@ func TestCanUpgradeToAltair(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CanUpgradeToAltair(tt.slot); got != tt.want {
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
}
})
@@ -143,7 +146,7 @@ func TestCanUpgradeBellatrix(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CanUpgradeToBellatrix(tt.slot); got != tt.want {
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
}
})
@@ -178,6 +181,85 @@ func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
b := &eth.BeaconState{Slot: tt.slot}
s, err := v1.InitializeFromProto(b)
require.NoError(t, err)
assert.Equal(t, tt.canProcessEpoch, CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
assert.Equal(t, tt.canProcessEpoch, time.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
}
}
func TestAltairCompatible(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
type args struct {
s state.BeaconState
e types.Epoch
}
tests := []struct {
name string
args args
want bool
}{
{
name: "phase0 state",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisState(t, 1)
return st
}(),
},
want: false,
},
{
name: "altair state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateAltair(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, bellatrix epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().BellatrixForkEpoch,
},
want: true,
},
{
name: "bellatrix state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, phase0 epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.HigherEqualThanAltairVersionAndEpoch(tt.args.s, tt.args.e); got != tt.want {
t.Errorf("HigherEqualThanAltairVersionAndEpoch() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -90,7 +90,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -101,7 +101,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -178,7 +178,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -190,7 +190,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
@@ -198,7 +198,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)

View File

@@ -92,7 +92,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -103,7 +103,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
wsb, err = wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -180,7 +180,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -192,7 +192,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
wsb, err = wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
@@ -245,7 +245,7 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState,
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),

View File

@@ -29,7 +29,9 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb)
require.NoError(b, err)
}
}
@@ -50,12 +52,16 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, beaconState.SetSlot(currentSlot))
// Run the state transition once to populate the cache.
_, err = coreState.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), beaconState, wsb)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
}
}

View File

@@ -32,10 +32,14 @@ func TestSkipSlotCache_OK(t *testing.T) {
// with the state
blk, err := util.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb)
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wsb)
require.NoError(t, err, "Could not process state transition")
assert.DeepEqual(t, originalState.CloneInnerState(), bState.CloneInnerState(), "Skipped slots cache leads to different states")
@@ -57,7 +61,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
// with the state
blk, err := util.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb)
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
@@ -70,7 +76,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
signature, err := util.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
require.NoError(t, err, "Could not run state transition")
}
@@ -81,7 +89,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
signature, err := util.BlockSignature(originalState, blk.Block, privs)
require.NoError(t, err)
blk.Signature = signature.Marshal()
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wrapper.WrappedPhase0SignedBeaconBlock(blk))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
require.NoError(t, err, "Could not run state transition")
}

View File

@@ -92,7 +92,7 @@ func ExecuteStateTransition(
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
prevStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
// The block must have a higher slot than parent state.
if state.Slot() >= slot {
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
defer span.End()
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
if state == nil || state.IsNil() {
return nil, errors.New("nil state")

View File

@@ -25,7 +25,9 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
s, err := ExecuteStateTransition(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
s, err := ExecuteStateTransition(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for state: %v and signed block: %v", s, err, state, sb)
}
@@ -44,7 +46,9 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
stateRoot, err := CalculateStateRoot(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
stateRoot, err := CalculateStateRoot(ctx, state, wsb)
if err != nil && stateRoot != [32]byte{} {
t.Fatalf("state root should be empty on err. found: %v on error: %v for signed block: %v", stateRoot, err, sb)
}
@@ -99,7 +103,9 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(bb))
wsb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for block body: %v", s, err, bb)
}
@@ -117,7 +123,9 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
_, err := VerifyOperationLengths(context.Background(), state, wrapper.WrappedPhase0SignedBeaconBlock(bb))
wsb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
_, err = VerifyOperationLengths(context.Background(), state, wsb)
_ = err
}
}
@@ -164,7 +172,9 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
s, err := ProcessBlockForStateRoot(ctx, state, wrapper.WrappedPhase0SignedBeaconBlock(sb))
wsb, err := wrapper.WrappedSignedBeaconBlock(sb)
require.NoError(t, err)
s, err := ProcessBlockForStateRoot(ctx, state, wsb)
if err != nil && s != nil {
t.Fatalf("state should be nil on err. found: %v on error: %v for signed block: %v", s, err, sb)
}

View File

@@ -285,20 +285,18 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "could not process block header")
}
if state.Version() == version.Bellatrix {
enabled, err := b.ExecutionEnabled(state, blk.Body())
enabled, err := b.IsExecutionEnabled(state, blk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not check if execution is enabled")
}
if enabled {
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not check if execution is enabled")
return nil, err
}
if enabled {
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return nil, err
}
state, err = b.ProcessPayload(state, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process execution payload")
}
state, err = b.ProcessPayload(state, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process execution payload")
}
}

View File

@@ -52,7 +52,9 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -61,7 +63,9 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
assert.NoError(t, err)
verified, err := set.Verify()
assert.NoError(t, err)
@@ -104,7 +108,9 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -114,13 +120,17 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
}
func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
// Test Signature set verifies.
verified, err := set.Verify()
@@ -130,7 +140,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -143,7 +153,7 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -153,7 +163,7 @@ func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) {
beaconState, block := createFullBellatrixBlockWithOperations(t)
wsb, err := wrapper.WrappedBellatrixSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
require.NoError(t, err)
@@ -163,7 +173,7 @@ func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) {
func TestCalculateStateRootAltair_OK(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
r, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
@@ -173,7 +183,7 @@ func TestCalculateStateRootAltair_OK(t *testing.T) {
func TestProcessBlockDifferentVersion(t *testing.T) {
beaconState, _ := util.DeterministicGenesisState(t, 64) // Phase 0 state
_, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block) // Altair block
wsb, err := wrapper.WrappedSignedBeaconBlock(block) // Altair block
require.NoError(t, err)
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "state and block are different version. 0 != 1", err)

View File

@@ -44,7 +44,9 @@ func TestExecuteStateTransition_IncorrectSlot(t *testing.T) {
},
}
want := "expected state.slot"
_, err = transition.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb)
assert.ErrorContains(t, want, err)
}
@@ -87,7 +89,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
@@ -96,7 +100,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Signature = sig.Marshal()
beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err = wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, beaconState.Slot(), "Unexpected Slot number")
@@ -183,7 +189,9 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
cp.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wsb)
wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16"
assert.ErrorContains(t, wanted, err)
}
@@ -396,7 +404,9 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) {
len(b.Block.Body.ProposerSlashings), params.BeaconConfig().MaxProposerSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -413,7 +423,9 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -429,7 +441,9 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) {
len(b.Block.Body.Attestations), params.BeaconConfig().MaxAttestations)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -446,7 +460,9 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
len(b.Block.Body.VoluntaryExits), maxExits)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}
@@ -466,7 +482,9 @@ func TestProcessBlock_IncorrectDeposits(t *testing.T) {
}
want := fmt.Sprintf("incorrect outstanding deposits in block body, wanted: %d, got: %d",
s.Eth1Data().DepositCount-s.Eth1DepositIndex(), len(b.Block.Body.Deposits))
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
assert.ErrorContains(t, want, err)
}

View File

@@ -13,3 +13,9 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot

View File

@@ -27,6 +27,7 @@ type ReadOnlyDatabase interface {
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
HasBlock(ctx context.Context, blockRoot [32]byte) bool
GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, error)
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error)
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error)
@@ -45,14 +46,16 @@ type ReadOnlyDatabase interface {
HasArchivedPoint(ctx context.Context, slot types.Slot) bool
LastArchivedRoot(ctx context.Context) [32]byte
LastArchivedSlot(ctx context.Context) (types.Slot, error)
LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
// Fee reicipients operations.
FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error)
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
// origin checkpoint sync support
OriginBlockRoot(ctx context.Context) ([32]byte, error)
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -74,6 +77,7 @@ type NoHeadAccessDatabase interface {
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.
@@ -81,7 +85,7 @@ type NoHeadAccessDatabase interface {
// Run any required database migrations.
RunMigrations(ctx context.Context) error
// Fee reicipients operations.
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, addrs []common.Address) error
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
}
@@ -95,12 +99,13 @@ type HeadAccessDatabase interface {
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.
LoadGenesis(ctx context.Context, r io.Reader) error
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, state io.Reader, block io.Reader) error
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -25,6 +25,7 @@ go_library(
"state_summary.go",
"state_summary_cache.go",
"utils.go",
"validated_checkpoint.go",
"wss.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
@@ -46,6 +47,7 @@ go_library(
"//config/params:go_default_library",
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
@@ -91,6 +93,7 @@ go_test(
"state_summary_test.go",
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],

View File

@@ -21,7 +21,9 @@ func TestStore_Backup(t *testing.T) {
head := util.NewBeaconBlock()
head.Block.Slot = 5000
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(head)))
wsb, err := wrapper.WrappedSignedBeaconBlock(head)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -61,7 +63,9 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
for i := startSlot; i < 5200; i++ {
head := util.NewBeaconBlock()
head.Block.Slot = i
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(head)))
wsb, err := wrapper.WrappedSignedBeaconBlock(head)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
root, err := head.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
ssz "github.com/ferranbt/fastssz"
"github.com/golang/snappy"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
@@ -47,18 +48,18 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
return blk, err
}
// OriginBlockRoot returns the value written to the db in SaveOriginBlockRoot
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
// This is the root of a finalized block within the weak subjectivity period
// at the time the chain was started, used to initialize the database and chain
// without syncing from genesis.
func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.OriginBlockRoot")
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(originBlockRootKey)
rootSlice := bkt.Get(originCheckpointBlockRootKey)
if rootSlice == nil {
return ErrNotFoundOriginBlockRoot
}
@@ -69,6 +70,25 @@ func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -324,6 +344,22 @@ func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, erro
return blk, err
}
func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
r := bkt.Get(genesisBlockRootKey)
if len(r) == 0 {
return ErrNotFoundGenesisBlockRoot
}
root = bytesutil.ToBytes32(r)
return nil
})
return root, err
}
// SaveGenesisBlockRoot to the db.
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
@@ -334,16 +370,27 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
})
}
// SaveOriginBlockRoot is used to keep track of the block root used for origin sync.
// SaveOriginCheckpointBlockRoot is used to keep track of the block root used for syncing from a checkpoint origin.
// This should be a finalized block from within the current weak subjectivity period.
// This value is used by a running beacon chain node to locate the state at the beginning
// of the chain history, in places where genesis would typically be used.
func (s *Store) SaveOriginBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginBlockRoot")
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(originBlockRootKey, blockRoot[:])
return bucket.Put(originCheckpointBlockRootKey, blockRoot[:])
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
@@ -395,13 +442,13 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error) {
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.FeeRecipientByValidatorID")
defer span.End()
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(id))
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
if addr == nil {
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
}
@@ -412,7 +459,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (commo
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
// Error is returned if `ids` and `recipients` are not the same length.
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, feeRecipients []common.Address) error {
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
defer span.End()
@@ -423,7 +470,7 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint6
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
for i, id := range ids {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(id), feeRecipients[i].Bytes()); err != nil {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), feeRecipients[i].Bytes()); err != nil {
return err
}
}
@@ -641,31 +688,27 @@ func unmarshalBlock(_ context.Context, enc []byte) (block.SignedBeaconBlock, err
if err != nil {
return nil, err
}
var rawBlock ssz.Unmarshaler
switch {
case hasAltairKey(enc):
// Marshal block bytes to altair beacon block.
rawBlock := &ethpb.SignedBeaconBlockAltair{}
err := rawBlock.UnmarshalSSZ(enc[len(altairKey):])
if err != nil {
rawBlock = &ethpb.SignedBeaconBlockAltair{}
if err := rawBlock.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
return nil, err
}
return wrapper.WrappedAltairSignedBeaconBlock(rawBlock)
case hasBellatrixKey(enc):
rawBlock := &ethpb.SignedBeaconBlockBellatrix{}
err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):])
if err != nil {
rawBlock = &ethpb.SignedBeaconBlockBellatrix{}
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
return nil, err
}
return wrapper.WrappedBellatrixSignedBeaconBlock(rawBlock)
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock := &ethpb.SignedBeaconBlock{}
err = rawBlock.UnmarshalSSZ(enc)
if err != nil {
rawBlock = &ethpb.SignedBeaconBlock{}
if err := rawBlock.UnmarshalSSZ(enc); err != nil {
return nil, err
}
return wrapper.WrappedPhase0SignedBeaconBlock(rawBlock), nil
}
return wrapper.WrappedSignedBeaconBlock(rawBlock)
}
// marshal versioned beacon block from struct type down to bytes.

View File

@@ -31,7 +31,7 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedPhase0SignedBeaconBlock(b), nil
return wrapper.WrappedSignedBeaconBlock(b)
},
},
{
@@ -42,7 +42,7 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedAltairSignedBeaconBlock(b)
return wrapper.WrappedSignedBeaconBlock(b)
},
},
{
@@ -53,11 +53,28 @@ var blockTests = []struct {
if root != nil {
b.Block.ParentRoot = root
}
return wrapper.WrappedBellatrixSignedBeaconBlock(b)
return wrapper.WrappedSignedBeaconBlock(b)
},
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
expected := [32]byte{}
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := types.Slot(20)
@@ -212,7 +229,9 @@ func TestStore_GenesisBlock(t *testing.T) {
blockRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blockRoot))
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
retrievedBlock, err := db.GenesisBlock(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(genesisBlock, retrievedBlock.Proto()), "Wanted: %v, received: %v", genesisBlock, retrievedBlock)
@@ -595,11 +614,11 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
func TestStore_FeeRecipientByValidatorID(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
ids := []uint64{0, 0, 0}
ids := []types.ValidatorIndex{0, 0, 0}
feeRecipients := []common.Address{{}, {}, {}, {}}
require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
ids = []uint64{0, 1, 2}
ids = []types.ValidatorIndex{0, 1, 2}
feeRecipients = []common.Address{{'a'}, {'b'}, {'c'}}
require.NoError(t, db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
f, err := db.FeeRecipientByValidatorID(ctx, 0)

View File

@@ -53,7 +53,9 @@ func TestStore_FinalizedCheckpoint_CanSaveRetrieve(t *testing.T) {
}
// a valid chain is required to save finalized checkpoint.
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))

View File

@@ -13,5 +13,11 @@ var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")

View File

@@ -32,7 +32,7 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
// - De-index all finalized beacon block roots from previous_finalized_epoch to
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
// root until a parent is found in the index or the parent is genesis.
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
//
// This method ensures that all blocks from the current finalized epoch are considered "final" while
@@ -46,6 +46,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
root := checkpoint.Root
var previousRoot []byte
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
// De-index recent finalized block roots, to be re-indexed.
previousFinalizedCheckpoint := &ethpb.Checkpoint{}
@@ -74,7 +75,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
if bytes.Equal(root, genesisRoot) || bytes.Equal(root, initCheckpointRoot) {
break
}

View File

@@ -63,7 +63,9 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
}
@@ -207,7 +209,8 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []block.Signed
var err error
previousRoot, err = blocks[j-i].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-i] = wrapper.WrappedPhase0SignedBeaconBlock(blocks[j-i])
ifaceBlocks[j-i], err = wrapper.WrappedSignedBeaconBlock(blocks[j-i])
require.NoError(t, err)
}
return ifaceBlocks
}
@@ -224,7 +227,7 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
var err error
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-startIdx], err = wrapper.WrappedAltairSignedBeaconBlock(blocks[j-startIdx])
ifaceBlocks[j-startIdx], err = wrapper.WrappedSignedBeaconBlock(blocks[j-startIdx])
require.NoError(t, err)
}
return ifaceBlocks

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
@@ -28,7 +26,11 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
if err != nil {
return errors.Wrap(err, "could not get genesis block root")
}
if err := s.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
if err != nil {
return errors.Wrap(err, "could not wrap genesis block")
}
if err := s.SaveBlock(ctx, wsb); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
@@ -50,14 +52,10 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
return nil
}
// LoadGenesis loads a genesis state from a given file path, if no genesis exists already.
func (s *Store) LoadGenesis(ctx context.Context, r io.Reader) error {
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// LoadGenesis loads a genesis state from a ssz-serialized byte slice, if no genesis exists already.
func (s *Store) LoadGenesis(ctx context.Context, sb []byte) error {
st := &ethpb.BeaconState{}
if err := st.UnmarshalSSZ(b); err != nil {
if err := st.UnmarshalSSZ(sb); err != nil {
return err
}
gs, err := statev1.InitializeFromProtoUnsafe(st)

View File

@@ -53,21 +53,16 @@ func TestLoadGenesisFromFile(t *testing.T) {
if err == nil {
fp = rfp
}
r, err := os.Open(fp)
sb, err := os.ReadFile(fp)
assert.NoError(t, err)
defer func() {
assert.NoError(t, r.Close())
}()
db := setupDB(t)
assert.NoError(t, db.LoadGenesis(context.Background(), r))
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
testGenesisDataSaved(t, db)
// Loading the same genesis again should not throw an error
_, err = r.Seek(0, 0)
assert.NoError(t, err)
assert.NoError(t, db.LoadGenesis(context.Background(), r))
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
}
func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
@@ -76,15 +71,12 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
if err == nil {
fp = rfp
}
r, err := os.Open(fp)
sb, err := os.ReadFile(fp)
assert.NoError(t, err)
defer func() {
assert.NoError(t, r.Close())
}()
// Loading a genesis with the wrong fork version as beacon config should throw an error.
db := setupDB(t)
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), r))
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), sb))
}
func TestEnsureEmbeddedGenesis(t *testing.T) {

View File

@@ -175,7 +175,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
powchainBucket,
stateSummaryBucket,
stateValidatorsBucket,
validatedTips,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,

View File

@@ -19,7 +19,6 @@ var (
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
validatedTips = []byte("validated-synced-tips")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -39,19 +38,22 @@ var (
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
// block root included in the beacon state used by weak subjectivity initial sync
originBlockRootKey = []byte("origin-block-root")
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")

View File

@@ -638,7 +638,11 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
if err != nil {
return 0, err
}
if err := helpers.BeaconBlockIsNil(wrapper.WrappedPhase0SignedBeaconBlock(b)); err != nil {
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
if err != nil {
return 0, err
}
if err := helpers.BeaconBlockIsNil(wsb); err != nil {
return 0, err
}
return b.Block.Slot, nil

View File

@@ -374,7 +374,9 @@ func TestStore_StatesBatchDelete(t *testing.T) {
for i := 0; i < len(totalBlocks); i++ {
b := util.NewBeaconBlock()
b.Block.Slot = types.Slot(i)
totalBlocks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
var err error
totalBlocks[i], err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := totalBlocks[i].Block().HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
@@ -425,7 +427,9 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 100
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
finalizedBlockRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -450,7 +454,9 @@ func TestStore_DeleteHeadState(t *testing.T) {
blk := util.NewBeaconBlock()
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 100
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
headBlockRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -470,7 +476,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
@@ -480,7 +488,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 100
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
@@ -490,7 +500,9 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1000
r2, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1000))
@@ -524,7 +536,9 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -561,7 +575,9 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
b.Block.ParentRoot = prevRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
bRoots = append(bRoots, r)
prevRoot = r
@@ -600,7 +616,9 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -628,7 +646,9 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
unfinalizedRoots = append(unfinalizedRoots, r)
st, err := util.NewBeaconState()

View File

@@ -0,0 +1,49 @@
package kv
import (
"context"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// LastValidatedCheckpoint returns the latest fully validated checkpoint in beacon chain.
func (s *Store) LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastValidatedCheckpoint")
defer span.End()
var checkpoint *ethpb.Checkpoint
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(checkpointBucket)
enc := bkt.Get(lastValidatedCheckpointKey)
if enc == nil {
var finErr error
checkpoint, finErr = s.FinalizedCheckpoint(ctx)
return finErr
}
checkpoint = &ethpb.Checkpoint{}
return decode(ctx, enc, checkpoint)
})
return checkpoint, err
}
// SaveLastValidatedCheckpoint saves the last validated checkpoint in beacon chain.
func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastValidatedCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
return errMissingStateForCheckpoint
}
return bucket.Put(lastValidatedCheckpointKey, enc)
})
}

Some files were not shown because too many files have changed in this diff Show More