Compare commits

..

43 Commits

Author SHA1 Message Date
symbolpunk
3bbaa5e9c5 bazel 2022-05-20 14:29:23 -04:00
Mike Neuder
9fab9df61e Refactor validator accounts delete to remove cli context dependency (#10686)
* add functional options accounts delete

* bazel run //:gazelle -- fix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-17 23:13:36 +00:00
Radosław Kapka
eedafac822 SSZ responses for block production (#10697)
* Simplify SSZ handling

* fix tests

* add version to SSZ proto and to GetBeaconStateSSZV2

* TestProduceBlockV2SSZ

* rest of tests

* middleware

* use constants

* some middleware changes

* test fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-17 20:13:06 +00:00
terencechain
e9ad7aeff8 Fix a bad forkchoice proposer boost test (#10705)
* Fix a bad forkchoice proposer boost test

* Improve tests

* Add fork regression

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-17 16:26:56 -03:00
terencechain
3cfef20938 Add better debugging logs for validate block p2p pipeline (#10698) 2022-05-17 17:27:52 +00:00
Potuz
e90284bc00 Add Error checks for nil inner state (#10701) 2022-05-17 17:38:35 +08:00
terencechain
01e15a033f Improve beacon node doesn't have a parent in db logs (#10689)
* Improve beacon node doesn't have a parent in db logs

* Update round_robin.go

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-16 18:51:27 +00:00
Radosław Kapka
f09b06d6f6 Allow SSZ-serialized blocks in publishBlindedBlock (#10679)
* SubmitBlockSSZ grpc

* SubmitBlockSSZ middleware

* test fixes

* use VersionedUnmarshaller

* use VersionedUnmarshaller

(cherry picked from commit 7388eeb963)

* tests

* fuzz: Add fuzz tests for sparse merkle trie (#10662)

* Add fuzz tests for sparse merkle trie and change HTR signature to return an error

* fix capitalization of error message

* Add engine timeout values (#10645)

* Add timeout values

* Update engine_client.go

* Update engine_client.go

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Cleanup of `stategen` package (#10607)

* powchain and stategen

* revert powchain changes

* rename field to blockRootsOfSavedStates

* rename params to blockRoot

* review feedback

* fix loop

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Process atts and update head before proposing (#10653)

* Process atts and updeate head

* Fix ctx

* New test and old tests

* Update validator_test.go

* Update validator_test.go

* Update service.go

* Rename to UpdateHead

* Update receive_attestation.go

* Update receive_attestation.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Add link to e2e docs in `README` (#10672)

* Improve `ReceiveBlock`'s comment (#10671)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Call fcu on invalid payload (#10565)

* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Cache and use justified and finalized payload block hash (#10657)

* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* do not export slotFromBlock

* simplify tests

* grpc

* middleware

* extract package-level consts

* Simplify SSZ handling

* fix tests

* test fixes

* test hack

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-16 17:59:43 +00:00
james-prysm
16e66ee1b8 fee-recipient: update error log to warn log (#10684)
* initial commit

* updating fee recipient on beacon node

* updating logs

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-16 14:40:13 +00:00
terencechain
3d3890205f Remove invalid terminal block error code (#10646)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-15 03:14:44 +00:00
terencechain
a90335b15e Blockchain: Use get_block to retrieve block (#10688)
* Use get block

* Remove unused errNilParentInDB

* Fix TestVerifyBlkDescendant

* Update beacon-chain/blockchain/service.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-15 02:23:01 +00:00
Radosław Kapka
8cd43d216f Changes to SSZ handling (#10687)
* Simplify SSZ handling

* fix tests

* add version to SSZ proto and to GetBeaconStateSSZV2

* remove unwanted methods

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-13 20:29:18 +00:00
Radosław Kapka
d25c0ec1a5 Fix bugs in /eth/v1/beacon/blinded_blocks (#10673)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-13 18:20:39 +00:00
Radosław Kapka
e1c4427ea5 Allow SSZ-serialized blocks in publishBlock (#10663)
* SubmitBlockSSZ grpc

* SubmitBlockSSZ middleware

* test fixes

* use VersionedUnmarshaller

(cherry picked from commit 7388eeb963)

* tests

* do not export slotFromBlock

* simplify tests

* extract package-level consts

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-13 10:54:45 +00:00
Preston Van Loon
7042791e31 fuzz: Fix off by one error in sparse merkle trie item insertion (#10668)
* fuzz: Fix off by one error in sparse merkle trie item insertion

* remove new line

* Move validation to the proto constructor

* fix build

* Add a unit test because @nisdas is going to ask for it

* fix up

* gaz

* Update container/trie/sparse_merkle.go

* Update container/trie/sparse_merkle_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
2022-05-13 07:02:43 +00:00
terencechain
e771585b77 Fix error.Is ordering (#10685)
* Fix error.is ordering for ErrUndefinedExecutionEngineError

* Update BUILD.bazel
2022-05-12 22:43:34 +00:00
Radosław Kapka
98622a052f Extract OptimisticSyncFetcher interface (#10654)
* Extract `OptimisticSyncFetcher` interface

* extract IsOptimistic

* fix tests

* more test fixes

* even more test fixes

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 17:23:45 +00:00
Potuz
61033ebea1 handle failure to update head (#10651)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 16:36:46 +00:00
Potuz
e808025b17 regression test off-by-one (#10675)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:49:37 +00:00
Radosław Kapka
7db0435ee0 Unify WARNING comments (#10678)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:25:44 +00:00
Nishant Das
1f086e4333 add more fuzz targets (#10682) 2022-05-12 10:47:29 -04:00
james-prysm
184e5be9de Fee recipient: checksum log (#10664)
* adding checksum check at validator client and beacon node

* adding validation and logs on validator client startup

* moving the log and validation

* fixing unit tests

* adding test for back checksum on validator client

* fixing bazel

* addressing comments

* fixing log display

* Update beacon-chain/node/config.go

* Apply suggestions from code review

* breaking up lines

* fixing unit test

* ugh another fix to unit test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 19:36:57 +00:00
terencechain
e33850bf51 Don't return nil with new head (#10680) 2022-05-11 11:33:10 -07:00
Preston Van Loon
cc643ac4cc native-state: Simplify MarshalSSZ (#10677)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 14:25:11 +00:00
Nishant Das
abefe1e9d5 Add Sync Block Topic Fuzz Target (#10676)
* add new target

* remove

* Update beacon-chain/sync/BUILD.bazel
2022-05-11 13:18:12 +00:00
Nishant Das
b4e89fb28b Add in State Fuzzing (#10375)
* add it in

* build tags

* gaz

* remove dependency on fast-ssz for now

* copy and comment

* add in native state

* fix build

* no assert on invalid input, just return

* Add failing case

* fix it up

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-05-11 12:47:54 +00:00
terencechain
4ad1c4df01 Cache and use justified and finalized payload block hash (#10657)
* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 21:20:28 +00:00
terencechain
6a197b47d9 Call fcu on invalid payload (#10565)
* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 20:02:00 +00:00
Radosław Kapka
d102421a25 Improve ReceiveBlock's comment (#10671)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 19:13:28 +00:00
Radosław Kapka
7b1490429c Add link to e2e docs in README (#10672) 2022-05-10 14:16:40 +00:00
terencechain
bbdf19cfd0 Process atts and update head before proposing (#10653)
* Process atts and updeate head

* Fix ctx

* New test and old tests

* Update validator_test.go

* Update validator_test.go

* Update service.go

* Rename to UpdateHead

* Update receive_attestation.go

* Update receive_attestation.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 23:12:50 +00:00
Radosław Kapka
5d94030b4f Cleanup of stategen package (#10607)
* powchain and stategen

* revert powchain changes

* rename field to blockRootsOfSavedStates

* rename params to blockRoot

* review feedback

* fix loop

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 22:25:47 +00:00
terencechain
16273a2040 Add engine timeout values (#10645)
* Add timeout values

* Update engine_client.go

* Update engine_client.go

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-05-09 20:02:20 +00:00
Preston Van Loon
97663548a1 fuzz: Add fuzz tests for sparse merkle trie (#10662)
* Add fuzz tests for sparse merkle trie and change HTR signature to return an error

* fix capitalization of error message
2022-05-09 16:51:48 +00:00
Radosław Kapka
7d9d8454b1 Improvements to powchain package (#10652)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 13:57:34 +00:00
Radosław Kapka
21bdbd548a Deduplicate native state (a.k.a. One State to rule them all) (#10483)
* v0

* getters/setters

* init and copy

* hasher

* all the nice stuff

* make bazel happy

* remove tests for smaller PR

* remove old states

* move files

* import fixes

* custom MarshalSSZ

* fixed deadlock

* copy version when copying state

* correct issues in state_trie

* fix Copy()

* better e2e comment

* add code to minimal state

* spectest test

* Revert "Auxiliary commit to revert individual files from 84154423464e8372f7e0a03367403656ac5cd78e"

This reverts commit 9602599d183081291dfa0ba4f1036430f63a7822.

* native state assert

* always error

* always log

* more native state usage

* cleanup

* remove empty line

* Revert "spectests"

This reverts commit 1c49bed5d1cf6224afaf21e18562bf72fae5d2b6.

# Conflicts:
#	beacon-chain/powchain/service.go
#	beacon-chain/state/v1/state_trie.go
#	beacon-chain/state/v2/state_trie.go
#	beacon-chain/state/v3/state_trie.go
#	testing/spectest/shared/phase0/finality/BUILD.bazel
#	testing/spectest/shared/phase0/finality/runner.go

* dedup field trie

* fix test issues

* cleanup

* use correct field num in FinalizedRootProof

* use existing version constant

* halfway there

* "working" version

* some fixes

* fix field nums in tests

* rename v0types to nativetypes

* Revert "Auxiliary commit to revert individual files from dc549b1cf8e724bd08cee1ecc760ff3771d5592d"

This reverts commit 7254d3070d8693b283fc686a2e01a822ecbac1b3.

* uncomment code

* remove map size

* Revert "Revert "spectests""

This reverts commit 39c271ae6b.

* use reverse map

* Revert "Revert "Revert "spectests"""

This reverts commit 19ba8cf95c.

* finally found the bug

(cherry picked from commit a5414c4be1bdb61a50b391ea5301895e772cc5e9)

* simplify populateFieldIndexes

* fix copy

(cherry picked from commit 7da4fb8cf51557ef931bb781872ea52fc6731af5)

* remove native state from e2e

* remove index map

* unsupported functions

* Use ProtobufBeaconState() from native state

* tests

* typo

* reduce complexity of `SaveStatesEfficient`

* remove unused receiver name

* update doc.go

* fix test assertion

* fix test assertion 2

* Phase0 justification bits

* bring back state tests

* rename fieldIndexRev

* versioning of ToProto

* remove version check from unexported function

* hasher tests

* don't return error from JustificationBits

* extract fieldConvertersNative

* helper error function

* use fieldConvertersNative

* Introduce RealPosition method on FieldIndex

* use RealPosition in hasher

* remove unused fields

* remove TestAppendBeyondIndicesLimit

(cherry picked from commit 3017e700282969c30006b64c95c21ffe6b166f8b)

* simplify RealPosition

* rename field interface

* use helper in proofs.go

* Update beacon-chain/core/altair/upgrade.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-09 13:02:34 +00:00
Nishant Das
e2caaf972f Fix Multiclient E2E Runs (#10660) 2022-05-09 12:38:11 +02:00
Nishant Das
c5ddc266ae Perform Request Size Limiting When Caching Eth1 Headers (#10649)
* rate limit reqs

* fix issues

* make it better

* final check

* terence's review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 03:19:23 +00:00
Nishant Das
74518f0e1b Log Out Missing Validators in Participation Drops (#10624)
* debug failures

* clean up

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-09 01:44:47 +00:00
terencechain
122c3f44cc Insert over-the-wire slashing to forkchoice (#10615)
* remove equivocating votes from forkchoice

* shutup deepsource

* Add insertSlashingsToForkChoiceStore

* Fix equality checks, add tests

* Update process_block.go

* Fix equality check for doublylinked

* More checks

* Rm err return

* Consider slashings over the wire

* Update new_slot_test.go

* Fix spec tests

* Add ReceiveAttesterSlashing

* Update mock.go

* Revert spec test changes

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-07 15:56:34 +00:00
kasey
b1b13cfca7 simplify config names, use strings (#10656)
* simplify config names, use strings

* lint

Co-authored-by: kasey <kasey@users.noreply.github.com>
2022-05-06 21:42:27 +00:00
Preston Van Loon
495013e832 Update github actions to go 1.18 (#10655) 2022-05-06 17:50:52 +00:00
kasey
ae82c17dc3 run process_slots before caching committee data (#10639)
* demonstrate issue with committee caching in e2e

* lint

* run process_slots before caching committee data

* pass transitioned state to UpdateCommitteeCache

* don't run extra epochs w/ web3signer

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-05-06 14:33:09 +08:00
383 changed files with 11556 additions and 13457 deletions

View File

@@ -1 +0,0 @@
5.0.0

View File

@@ -38,10 +38,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
@@ -55,16 +55,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.17
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v2
with:
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.17
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
version: v1.45.2
skip-go-installation: true
@@ -75,7 +75,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
- name: Check out code into the Go module directory

59
README_WINDOWS.md Normal file
View File

@@ -0,0 +1,59 @@
- install wsl 2 on your windows machine, verify it's wsl 2 and not 1
> sudo apt install npm
- can't install npm
> export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- then follow guidance here https://docs.prylabs.network/docs/install/install-with-bazel/
- version mismatch error, even when forcing the .bazelversion version to be installed
- delete .bazelversion, just make sure you keep WSL's bazel version in sync with the one specified in .bazelversion
> sudo apt install bazel --version=5.0.0
> bazel build //beacon-chain:beacon-chain --config=release
- workspace_status.sh not found
- use --workspace_status_command=/bin/true per https://docs.bazel.build/versions/main/user-manual.html#workspace_status
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
- error related to https://stackoverflow.com/questions/48674104/clang-error-while-loading-shared-libraries-libtinfo-so-5-cannot-open-shared-o
> sudo apt install libncurses5
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
- 'gmpxx.h' file not found
> sudo apt-get install libgmp-dev
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
works!
start geth on localhost:8545
> bazel run //beacon-chain --config=release -- --http-web3provider=http://localhost:8545
works!
open vs code with prysm code
if you see errors, make sure you install https://www.mingw-w64.org/downloads/#mingw-builds
I used https://www.msys2.org/ and followed the instructions on that page
in combination with instructions on this page https://code.visualstudio.com/docs/cpp/config-mingw#_prerequisites
blst errors... try --define=blst_modern=true
bazel build //validator:validator --config=release --workspace_status_command=/bin/true --define=blst_modern=true
-----
> bazel build //cmd/beacon-chain:beacon-chain --workspace_status_command=/bin/true --define=blst_modern=true
> bazel build //cmd/validator:validator --workspace_status_command=/bin/true --define=blst_modern=true
---
then issue this command bazel run //beacon-chain -- --datadir /tmp/chaindata --force-clear-db --interop-genesis-state /tmp/genesis.ssz --interop-eth1data-votes --min-sync-peers=0 --http-web3provider=http://localhost:8545 --deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 --bootstrap-node= --chain-config-file=/tmp/merge.yml

View File

@@ -137,6 +137,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
@@ -188,6 +189,7 @@ go_test(
"//beacon-chain/powchain/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -20,7 +20,7 @@ import (
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
// directly retrieves chain info related data.
// directly retrieve chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
FinalizationFetcher
@@ -31,6 +31,12 @@ type ChainInfoFetcher interface {
HeadDomainFetcher
}
// HeadUpdater defines a common interface for methods in blockchain service
// which allow to update the head info
type HeadUpdater interface {
UpdateHead(context.Context) error
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
@@ -43,7 +49,7 @@ type GenesisFetcher interface {
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieves head related data.
// directly retrieve head related data.
type HeadFetcher interface {
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
@@ -55,8 +61,6 @@ type HeadFetcher interface {
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -73,7 +77,7 @@ type CanonicalFetcher interface {
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieves finalization and justification related data.
// directly retrieve finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
@@ -81,6 +85,12 @@ type FinalizationFetcher interface {
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
type OptimisticModeFetcher interface {
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
cp := s.store.FinalizedCheckpt()
@@ -232,7 +242,7 @@ func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validator
// GenesisValidatorsRoot returns the genesis validators
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
s.headLock.RLock()
@@ -299,7 +309,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
return v.PublicKey(), nil
}
// ForkChoicer returns the forkchoice interface
// ForkChoicer returns the forkchoice interface.
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
@@ -315,7 +325,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return s.IsOptimisticForRoot(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
@@ -345,7 +355,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
@@ -363,7 +373,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, err
}
// historical non-canonical blocks here are returned as optimistic for safety.
// Historical non-canonical blocks here are returned as optimistic for safety.
return !isCanonical, nil
}
@@ -372,7 +382,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
// ForkChoiceStore returns the fork choice store in the service
// ForkChoiceStore returns the fork choice store in the service.
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}

View File

@@ -51,7 +51,7 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
@@ -62,7 +62,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
}
@@ -73,7 +73,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
@@ -83,7 +83,7 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
}

View File

@@ -11,8 +11,6 @@ var (
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or
// block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")

View File

@@ -31,11 +31,9 @@ var (
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
finalizedRoot [32]byte
justifiedRoot [32]byte
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block hash")
}
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get justified block hash")
}
finalizedHash := s.store.FinalizedPayloadBlockHash()
justifiedHash := s.store.JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: justifiedHash,
FinalizedBlockHash: finalizedHash,
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return nil, err
}
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
if err != nil {
return nil, err
}
b, err := s.getBlock(ctx, r)
if err != nil {
return nil, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, err
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
})
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", headRoot),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return nil, ErrInvalidPayload
return pid, ErrInvalidPayload
default:
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return nil, err
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
return params.BeaconConfig().ZeroHash[:], nil
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
payload, err := blk.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return payload.BlockHash, nil
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
@@ -219,14 +235,10 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.B
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return nil
}
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return err
}
if parent == nil || parent.IsNil() {
return errNilParentInDB
}
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
if err != nil {
return err
@@ -266,7 +278,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": fieldparams.EthBurnAddressHex,
}).Error("Fee recipient not set. Using burn address")
}).Warn("Fee recipient is currently using the burn address, " +
"you will not be rewarded transaction fees on this setting. " +
"Please set a different eth address as the fee recipient. " +
"Please refer to our documentation for instructions")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
finalizedRoot: tt.finalizedRoot,
justifiedRoot: tt.justifiedRoot,
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
}
_, err := service.notifyForkchoiceUpdate(ctx, arg)
if tt.errString != "" {
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
}
//
//
// A <- B <- C <- D
// \
// ---------- E <- F
// \
// ------ G
// D is the current head, attestations for F and G come late, both are invalid.
// We switch recursively to F then G and finally to D.
//
// We test:
// 1. forkchoice removes blocks F and G from the forkchoice implementation
// 2. forkchoice removes the weights of these blocks
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
ba.Block.Body.ExecutionPayload.BlockNumber = 1
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
require.NoError(t, err)
bra, err := wba.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
bb := util.NewBeaconBlockBellatrix()
bb.Block.Body.ExecutionPayload.BlockNumber = 2
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
brb, err := wbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
bc := util.NewBeaconBlockBellatrix()
bc.Block.Body.ExecutionPayload.BlockNumber = 3
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
require.NoError(t, err)
brc, err := wbc.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
bd := util.NewBeaconBlockBellatrix()
pd := [32]byte{'D'}
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
bd.Block.Body.ExecutionPayload.BlockNumber = 4
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
require.NoError(t, err)
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
be := util.NewBeaconBlockBellatrix()
pe := [32]byte{'E'}
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
be.Block.Body.ExecutionPayload.BlockNumber = 5
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
require.NoError(t, err)
bre, err := wbe.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
bf := util.NewBeaconBlockBellatrix()
pf := [32]byte{'F'}
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
bf.Block.Body.ExecutionPayload.BlockNumber = 6
bf.Block.ParentRoot = bre[:]
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
require.NoError(t, err)
brf, err := wbf.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
bg := util.NewBeaconBlockBellatrix()
bg.Block.Body.ExecutionPayload.BlockNumber = 7
pg := [32]byte{'G'}
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
bg.Block.ParentRoot = bre[:]
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
require.NoError(t, err)
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
// Insert blocks into forkchoice
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
require.NoError(t, err)
require.Equal(t, brg, headRoot)
// Prepare Engine Mock to return invalid unless head is D, LVH = E
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
fc := &ethpb.Checkpoint{Epoch: 0, Root: bra[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.ErrorIs(t, ErrInvalidPayload, err)
// Ensure Head is D
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
@@ -537,11 +682,11 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
service.store.SetJustifiedCheckptAndPayloadHash(
&ethpb.Checkpoint{
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
}, [32]byte{'a'})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
err = service.optimisticCandidateBlock(ctx, tt.blk)
@@ -625,7 +770,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
@@ -803,7 +948,7 @@ func TestService_getPayloadHash(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = service.getPayloadHash(ctx, [32]byte{})
_, err = service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()
@@ -813,20 +958,20 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err := service.getPayloadHash(ctx, r)
h, err := service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
bb := util.NewBeaconBlockBellatrix()
h = []byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h
h = [32]byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err = service.getPayloadHash(ctx, r)
h, err = service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, []byte{'a'}, h)
require.DeepEqual(t, [32]byte{'a'}, h)
}

View File

@@ -40,7 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not update head")
}
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
headBlock, err := s.getBlock(ctx, headRoot)
if err != nil {
return err
}
@@ -86,7 +86,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
jb, err := s.getBlock(ctx, headStartRoot)
if err != nil {
return [32]byte{}, err
}
@@ -355,7 +355,7 @@ func (s *Service) notifyNewHeadEvent(
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}

View File

@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)

View File

@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
}
}
// warning: only use these opts when you are certain there are no db calls
// WARNING: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {

View File

@@ -64,7 +64,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
return err
}
if bytes.Equal(r, f.Root) {
s.store.SetJustifiedCheckpt(bj)
h, err := s.getPayloadHash(ctx, bj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
}
}
return nil

View File

@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
r = bytesutil.ToBytes32([]byte{'A'})
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33

View File

@@ -123,11 +123,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
s.insertSlashingsToForkChoiceStore(ctx, signed.Block())
s.insertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -195,9 +194,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
if newFinalized {
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
cp := postState.FinalizedCheckpoint()
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
cp = postState.CurrentJustifiedCheckpoint()
h, err = s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
@@ -413,6 +422,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, nil, err
}
}
for r, st := range boundaries {
@@ -426,14 +439,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
}
f := fCheckpoints[len(fCheckpoints)-1]
j := jCheckpoints[len(jCheckpoints)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
finalizedRoot: bytesutil.ToBytes32(f.Root),
justifiedRoot: bytesutil.ToBytes32(j.Root),
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return nil, nil, err
@@ -484,7 +493,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(fCheckpoint)
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
}
return nil
}
@@ -495,15 +508,15 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
defer span.End()
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
return err
}
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
@@ -576,8 +589,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfac
// Inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
func (s *Service) insertSlashingsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock) {
slashings := blk.Body().AttesterSlashings()
func (s *Service) insertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
for _, index := range indices {

View File

@@ -97,13 +97,10 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
return errNilFinalizedInStore
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
if err != nil {
return err
}
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
return errors.New("nil finalized block")
}
finalizedBlk := finalizedBlkSigned.Block()
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
if err != nil {
@@ -208,7 +205,11 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
return errNilJustifiedInStore
}
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(cpt)
h, err := s.getPayloadHash(ctx, cpt.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
}
return nil
@@ -227,7 +228,11 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
s.store.SetJustifiedCheckpt(cp)
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return nil
}
@@ -350,7 +355,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
higherThanFinalized := slot > fSlot
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
b, err := s.getBlock(ctx, parentRoot)
if err != nil {
return err
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"strconv"
"sync"
"testing"
"time"
@@ -39,6 +40,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock_ProtoArray(t *testing.T) {
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
err = service.verifyBlkPreState(ctx, wb)
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -723,7 +729,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -768,7 +774,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -814,7 +820,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -863,7 +869,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -913,7 +919,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -974,7 +980,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1321,7 +1327,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
args: args{
finalizedRoot: [32]byte{'a'},
},
wantedErr: "nil finalized block",
wantedErr: "block not found in cache or db",
},
{
name: "could not get finalized block root in DB",
@@ -1350,7 +1356,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1378,7 +1384,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.originBlockRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.store.SetJustifiedCheckpt(currentCp)
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
newCp := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
@@ -1439,7 +1445,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1493,7 +1499,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1524,7 +1530,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
@@ -1563,7 +1569,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
@@ -1885,5 +1891,84 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
b.Block.Body.AttesterSlashings = slashings
wb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.insertSlashingsToForkChoiceStore(ctx, wb.Block())
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
require.NoError(t, err)
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
require.NoError(t, err)
logHook := logTest.NewGlobal()
for i := 0; i < 10; i++ {
var wg sync.WaitGroup
wg.Add(4)
go func() {
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
}
}

View File

@@ -117,7 +117,7 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
log.Warn("Genesis time received, now available to process attestations")
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot/2)
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
@@ -128,42 +128,55 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
return
}
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
continue
if err := s.UpdateHead(s.ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
return
}
s.processAttestations(s.ctx)
justified := s.store.JustifiedCheckpt()
if justified == nil {
log.WithError(errNilJustifiedInStore).Error("Could not get justified checkpoint")
continue
}
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(justified.Root))
if err != nil {
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
continue
}
newHeadRoot, err := s.updateHead(s.ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
return nil
}
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
s.processAttestations(ctx)
justified := s.store.JustifiedCheckpt()
if justified == nil {
return errNilJustifiedInStore
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
if err != nil {
return err
}
newHeadRoot, err := s.updateHead(ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
return nil
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
return
}
@@ -172,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return // We don't have the block, don't notify the engine and update head.
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
@@ -189,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return
}
arg := &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
}
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
if err != nil {

View File

@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
invalidStateErr := "Could not get state from db"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
@@ -191,12 +191,51 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
service.notifyEngineIfChangedHead(ctx, [32]byte{})
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: tRoot[:]}))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
b := util.NewBeaconBlock()
wb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
@@ -24,9 +25,14 @@ type BlockReceiver interface {
HasBlock(ctx context.Context, root [32]byte) bool
}
// ReceiveBlock is a function that defines the the operations (minus pubsub)
// that are performed on blocks that is received from regular sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
}
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
@@ -79,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
@@ -88,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
@@ -133,6 +135,11 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
return s.hasBlockInInitSyncOrDB(ctx, root)
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.insertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
}
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
// Delete the processed block attestations from attestation pool.
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {

View File

@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
h := [32]byte{'a'}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, h)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 10000000})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))

View File

@@ -50,21 +50,22 @@ const headSyncMinEpochsAfterCheckpoint = 128
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
store *store.Store
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
store *store.Store
processAttestationsLock sync.Mutex
}
// config options for the service.
@@ -143,6 +144,7 @@ func (s *Service) Stop() error {
defer s.cancel()
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
// Save the last finalized state so that starting up in the following run will be much faster.
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
return err
}
@@ -199,13 +201,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
}
s.cfg.ForkChoiceStore = forkChoicer
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
if fb == nil || fb.IsNil() {
return errNilFinalizedInStore
}
payloadHash, err := getBlockPayloadHash(fb.Block())
if err != nil {
return errors.Wrap(err, "could not get execution payload hash")
@@ -337,14 +336,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
finalizedState.Slot(), flags.HeadSync.Name)
}
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
if finalizedState == nil || finalizedState.IsNil() {
return errors.New("finalized state can't be nil")
}
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
return errors.New("finalized state and block can't be nil")
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
s.setHead(finalizedRoot, finalizedBlock, finalizedState)

View File

@@ -31,6 +31,7 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -86,9 +87,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
require.NoError(t, err)
err = beaconDB.SavePowchainData(ctx, &ethpb.ETH1ChainData{
BeaconState: pbState,
Trie: &ethpb.SparseMerkleTrie{},
Trie: mockTrie.ToProto(),
CurrentEth1Data: &ethpb.LatestETH1Data{
BlockHash: make([]byte, 32),
},
@@ -221,7 +224,8 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
require.NoError(t, err)
trie, _, err := util.DepositTrieFromDeposits(deposits)
require.NoError(t, err)
hashTreeRoot := trie.HashTreeRoot()
hashTreeRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
genState, err := transition.EmptyGenesisState()
require.NoError(t, err)
err = genState.SetEth1Data(&ethpb.Eth1Data{
@@ -500,7 +504,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -521,7 +525,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -594,7 +598,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
@@ -617,7 +621,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)

View File

@@ -23,6 +23,13 @@ func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
return s.justifiedCheckpt
}
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.justifiedPayloadBlockHash
}
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
s.RLock()
@@ -37,6 +44,13 @@ func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
return s.finalizedCheckpt
}
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.finalizedPayloadBlockHash
}
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
@@ -51,18 +65,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.bestJustifiedCheckpt = cp
}
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.justifiedCheckpt = cp
s.justifiedPayloadBlockHash = h
}
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.finalizedCheckpt = cp
s.finalizedPayloadBlockHash = h
}
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.

View File

@@ -30,8 +30,10 @@ func Test_store_JustifiedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.JustifiedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetJustifiedCheckpt(cp)
h := [32]byte{'b'}
s.SetJustifiedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.JustifiedCheckpt())
require.Equal(t, h, s.JustifiedPayloadBlockHash())
}
func Test_store_FinalizedCheckpt(t *testing.T) {
@@ -39,8 +41,10 @@ func Test_store_FinalizedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.FinalizedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetFinalizedCheckpt(cp)
h := [32]byte{'b'}
s.SetFinalizedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.FinalizedCheckpt())
require.Equal(t, h, s.FinalizedPayloadBlockHash())
}
func Test_store_PrevFinalizedCheckpt(t *testing.T) {

View File

@@ -17,9 +17,11 @@ import (
// best_justified_checkpoint: Checkpoint
// proposerBoostRoot: Root
type Store struct {
justifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
justifiedCheckpt *ethpb.Checkpoint
justifiedPayloadBlockHash [32]byte
finalizedCheckpt *ethpb.Checkpoint
finalizedPayloadBlockHash [32]byte
bestJustifiedCheckpt *ethpb.Checkpoint
sync.RWMutex
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
// TODO(10094): Consider removing in v3.

View File

@@ -450,3 +450,9 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
return s.Optimistic, nil
}
// ProcessAttestationsAndUpdateHead mocks the same method in the chain service.
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}

View File

@@ -79,7 +79,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
store: &store.Store{},
wsVerifier: wv,
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
if tt.wantErr == nil {
require.NoError(t, err)

View File

@@ -430,7 +430,11 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
rootA, err := trie.HashTreeRoot()
require.NoError(t, err)
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, rootA, rootB)
}
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
@@ -488,7 +492,11 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
rootA, err := trie.HashTreeRoot()
require.NoError(t, err)
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, rootA, rootB)
}
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {

View File

@@ -16,6 +16,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/endtoend/evaluators:__subpackages__",
"//testing/spectest:__subpackages__",
"//testing/util:__pkg__",
"//validator/client:__pkg__",

View File

@@ -143,7 +143,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
@@ -202,7 +203,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := util.DepositTrieFromDeposits(dep)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,

View File

@@ -173,7 +173,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: registry,
Balances: balances,
@@ -233,7 +234,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := util.DepositTrieFromDeposits(dep)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
@@ -289,7 +291,9 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
require.NoError(t, err)
dep[i].Proof = proof
}
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
@@ -376,7 +380,9 @@ func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: registry,
Balances: balances,

View File

@@ -106,7 +106,7 @@ func weighJustificationAndFinalization(state state.BeaconState,
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits := state.JustificationBits()
newBits = state.JustificationBits()
newBits.SetBitAt(1, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
@@ -122,7 +122,7 @@ func weighJustificationAndFinalization(state state.BeaconState,
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits := state.JustificationBits()
newBits = state.JustificationBits()
newBits.SetBitAt(0, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err

View File

@@ -41,7 +41,10 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
}
}
depositRoot := t.HashTreeRoot()
depositRoot, err := t.HashTreeRoot()
if err != nil {
return nil, err
}
eth1Data.DepositRoot = depositRoot[:]
err = state.SetEth1Data(eth1Data)
if err != nil {

View File

@@ -40,6 +40,7 @@ go_library(
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//beacon-chain/state/v3:go_default_library",

View File

@@ -83,7 +83,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.ConfigNames[params.Mainnet]
cfg.ConfigName = params.MainnetName
params.OverrideBeaconConfig(cfg)
ctx := context.Background()

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
state_native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
@@ -204,7 +205,13 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].InnerStateUnsafe().(type) {
case *ethpb.BeaconState:
pbState, err := v1.ProtobufBeaconState(rawType)
var pbState *ethpb.BeaconState
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
} else {
pbState, err = v1.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
@@ -225,7 +232,13 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
return err
}
case *ethpb.BeaconStateAltair:
pbState, err := v2.ProtobufBeaconState(rawType)
var pbState *ethpb.BeaconStateAltair
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
} else {
pbState, err = v2.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
@@ -247,7 +260,13 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
return err
}
case *ethpb.BeaconStateBellatrix:
pbState, err := v3.ProtobufBeaconState(rawType)
var pbState *ethpb.BeaconStateBellatrix
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
} else {
pbState, err = v3.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
@@ -274,28 +293,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
}
// store the validator entries separately to save space.
valBkt := tx.Bucket(stateValidatorsBucket)
for hashStr, validatorEntry := range validatorsEntries {
key := []byte(hashStr)
// if the entry is not in the cache and not in the DB,
// then insert it in the DB and add to the cache.
if _, ok := s.validatorEntryCache.Get(key); !ok {
validatorEntryCacheMiss.Inc()
if valEntry := valBkt.Get(key); valEntry == nil {
valBytes, encodeErr := encode(ctx, validatorEntry)
if encodeErr != nil {
return encodeErr
}
if putErr := valBkt.Put(key, valBytes); putErr != nil {
return putErr
}
s.validatorEntryCache.Set(key, validatorEntry, int64(len(valBytes)))
}
} else {
validatorEntryCacheHit.Inc()
}
}
return nil
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
}); err != nil {
return err
}
@@ -303,6 +301,31 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
return nil
}
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
valBkt := tx.Bucket(stateValidatorsBucket)
for hashStr, validatorEntry := range validatorsEntries {
key := []byte(hashStr)
// if the entry is not in the cache and not in the DB,
// then insert it in the DB and add to the cache.
if _, ok := s.validatorEntryCache.Get(key); !ok {
validatorEntryCacheMiss.Inc()
if valEntry := valBkt.Get(key); valEntry == nil {
valBytes, encodeErr := encode(ctx, validatorEntry)
if encodeErr != nil {
return encodeErr
}
if putErr := valBkt.Put(key, valBytes); putErr != nil {
return putErr
}
s.validatorEntryCache.Set(key, validatorEntry, int64(len(valBytes)))
}
} else {
validatorEntryCacheHit.Inc()
}
}
return nil
}
// HasState checks if a state by root exists in the db.
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
_, span := trace.StartSpan(ctx, "BeaconDB.HasState")

View File

@@ -15,13 +15,13 @@ func TestSaveOrigin(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.ConfigNames[params.Mainnet]
cfg.ConfigName = params.MainnetName
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
db := setupDB(t)
st, err := genesis.State(params.Mainnet.String())
st, err := genesis.State(params.MainnetName)
require.NoError(t, err)
sb, err := st.MarshalSSZ()

View File

@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
node.parent.children = children[:len(children)-1]
break
}
}

View File

@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)
// ----- C
// /
// A <- B
// \
// ----------D
// D is invalid
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}

View File

@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -111,35 +111,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Insert a second block at slot 3 into the tree and boost its score.
// Insert a second block at slot 4 into the tree and boost its score.
// 0
// |
// 1
// |
// 2
// / \
// 3 4 <- HEAD
slot = types.Slot(3)
// 3 |
// 4 <- HEAD
slot = types.Slot(4)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
indexToHash(2),
zeroHash,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(3)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
CurrentSlot: clockSlot,
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
@@ -166,17 +168,27 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 34)
// (A: 54) -> (B: 44) -> (C: 10)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
// middle instead of the normal progression of (54 -> 44 -> 24).
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(54))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(44))
node3 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node3.weight, uint64(24))
node3 := f.store.nodeByRoot[indexToHash(3)]
require.Equal(t, node3.weight, uint64(10))
node4 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node4.weight, uint64(24))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)

View File

@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL.
//
// WARNING: This method returns an error if the root is not found in forkchoice
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()

View File

@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -111,29 +111,30 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Insert a second block at slot 3 into the tree and boost its score.
// Insert a second block at slot 4 into the tree and boost its score.
// 0
// |
// 1
// |
// 2
// / \
// 3 4 <- HEAD
slot = types.Slot(3)
// 3 |
// 4 <- HEAD
slot = types.Slot(4)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
indexToHash(2),
zeroHash,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(3)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
@@ -166,14 +167,22 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 24)
// \_->(D: 10)
// (A: 54) -> (B: 44) -> (C: 10)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
// middle instead of the normal progression of (54 -> 44 -> 24).
require.Equal(t, f.store.nodes[1].weight, uint64(54))
require.Equal(t, f.store.nodes[2].weight, uint64(44))
require.Equal(t, f.store.nodes[3].weight, uint64(34))
require.Equal(t, f.store.nodes[3].weight, uint64(10))
require.Equal(t, f.store.nodes[4].weight, uint64(24))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
@@ -117,7 +118,18 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
if err != nil {
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
}
checksumAddress := common.HexToAddress(ha)
if !mixedcaseAddress.ValidChecksum() {
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
"The checksummed address is %s and will be used as the fee recipient. "+
"We recommend using a mixed-case address (checksum) "+
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
}
c.DefaultFeeRecipient = checksumAddress
params.OverrideBeaconConfig(c)
return nil
}

View File

@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
func TestConfigureExecutionSetting(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
assert.LogsContain(t, hook,
"is not a checksum Ethereum address",
)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -779,6 +779,7 @@ func (b *BeaconNode) registerRPCService() error {
PeerManager: p2pService,
MetadataProvider: p2pService,
ChainInfoFetcher: chainService,
HeadUpdater: chainService,
HeadFetcher: chainService,
CanonicalFetcher: chainService,
ForkFetcher: chainService,
@@ -787,6 +788,7 @@ func (b *BeaconNode) registerRPCService() error {
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,

View File

@@ -54,7 +54,9 @@ func init() {
for k, v := range gossipTopicMappings {
GossipTypeMapping[reflect.TypeOf(v)] = k
}
// Specially handle Altair Objects.
// Specially handle Altair objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
// Specially handle Bellatrix objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBlindedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
}

View File

@@ -34,7 +34,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native/v1:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",

View File

@@ -22,7 +22,7 @@ const repeatedSearches = 2 * searchThreshold
// BlockExists returns true if the block exists, its height and any possible error encountered.
func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExists")
ctx, span := trace.StartSpan(ctx, "powchain.BlockExists")
defer span.End()
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
@@ -45,24 +45,9 @@ func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big
return true, new(big.Int).Set(header.Number), nil
}
// BlockExistsWithCache returns true if the block exists in cache, its height and any possible error encountered.
func (s *Service) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExistsWithCache")
defer span.End()
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
if err != nil {
return false, nil, err
}
span.AddAttributes(trace.BoolAttribute("blockCacheHit", true))
return true, hdrInfo.Number, nil
}
span.AddAttributes(trace.BoolAttribute("blockCacheHit", false))
return false, nil, nil
}
// BlockHashByHeight returns the block hash of the block at the given height.
func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockHashByHeight")
ctx, span := trace.StartSpan(ctx, "powchain.BlockHashByHeight")
defer span.End()
if exists, hInfo, err := s.headerCache.HeaderInfoByHeight(height); exists || err != nil {
@@ -90,9 +75,9 @@ func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (commo
return header.Hash(), nil
}
// BlockTimeByHeight fetches an eth1.0 block timestamp by its height.
// BlockTimeByHeight fetches an eth1 block timestamp by its height.
func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockTimeByHeight")
ctx, span := trace.StartSpan(ctx, "powchain.BlockTimeByHeight")
defer span.End()
if s.eth1DataFetcher == nil {
err := errors.New("nil eth1DataFetcher")
@@ -111,7 +96,7 @@ func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint6
// This is an optimized version with the worst case being O(2*repeatedSearches) number of calls
// while in best case search for the block is performed in O(1).
func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByTimestamp")
ctx, span := trace.StartSpan(ctx, "powchain.BlockByTimestamp")
defer span.End()
s.latestEth1DataLock.RLock()
@@ -122,15 +107,14 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
if time > latestBlkTime {
return nil, errors.Errorf("provided time is later than the current eth1 head. %d > %d", time, latestBlkTime)
}
// Initialize a pointer to eth1 chain's history to start our search
// from.
// Initialize a pointer to eth1 chain's history to start our search from.
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
numOfBlocks := uint64(0)
estimatedBlk := cursorNum.Uint64()
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
// Terminate if we cant find an acceptable block after
// Terminate if we can't find an acceptable block after
// repeated searches.
for i := 0; i < repeatedSearches; i++ {
if ctx.Err() != nil {
@@ -157,12 +141,12 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
// time - buffer <= head.time <= time + buffer
break
}
hinfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
hInfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
if err != nil {
return nil, err
}
cursorNum = hinfo.Number
cursorTime = hinfo.Time
cursorNum = hInfo.Number
cursorTime = hInfo.Time
}
// Exit early if we get the desired block.
@@ -170,15 +154,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
// target time. This method is used when head.time > targetTime
func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := startBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -194,8 +178,8 @@ func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int
// Performs a search to find a target eth1 block which is just earlier than or equal to the
// target time. This method is used when head.time < targetTime
func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := startBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -203,8 +187,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
if err != nil {
return nil, err
}
// Return the last block before we hit the threshold
// time.
// Return the last block before we hit the threshold time.
if info.Time > targetTime {
return s.retrieveHeaderInfo(ctx, info.Number.Uint64()-1)
}

View File

@@ -61,7 +61,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
require.NoError(t, err)
tickerChan := make(chan time.Time)
web3Service.headTicker = &time.Ticker{C: tickerChan}
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
tickerChan <- time.Now()
web3Service.cancel()
exitRoutine <- true
@@ -207,51 +207,6 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
require.Equal(t, 0, height.Cmp(header.Number))
}
func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
server, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
server.Stop()
})
web3Service, err := NewService(context.Background(),
WithHttpEndpoints([]string{endpoint}),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
header := &gethTypes.Header{
Number: big.NewInt(0),
}
err = web3Service.headerCache.AddHeader(header)
require.NoError(t, err)
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), header.Hash())
require.NoError(t, err, "Could not get block hash with given height")
require.Equal(t, true, exists)
require.Equal(t, 0, height.Cmp(header.Number))
}
func TestBlockExistsWithCache_HeaderNotCached(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
server, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
server.Stop()
})
web3Service, err := NewService(context.Background(),
WithHttpEndpoints([]string{endpoint}),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), common.BytesToHash([]byte("hash")))
require.NoError(t, err, "Could not get block hash with given height")
require.Equal(t, false, exists)
require.Equal(t, (*big.Int)(nil), height)
}
func TestService_BlockNumberByTimestamp(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
testAcc, err := mock.Setup()
@@ -313,11 +268,11 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) {
defer cancel()
// Provide an unattainable target time
_, err = web3Service.findLessTargetEth1Block(ctx, hd.Number, hd.Time/2)
_, err = web3Service.findMaxTargetEth1Block(ctx, hd.Number, hd.Time/2)
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
// Provide an attainable target time
blk, err := web3Service.findLessTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
blk, err := web3Service.findMaxTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
require.NoError(t, err)
require.NotEqual(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not less than the head")
}
@@ -351,11 +306,11 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
defer cancel()
// Provide an unattainable target time with respect to head
_, err = web3Service.findMoreTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
_, err = web3Service.findMinTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
// Provide an attainable target time with respect to head
blk, err := web3Service.findMoreTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
blk, err := web3Service.findMinTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
require.NoError(t, err)
require.Equal(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not equal to the head")
}

View File

@@ -138,7 +138,8 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
deposits[0].Proof, err = trie.MerkleProof(0)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
@@ -178,7 +179,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
trie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
@@ -213,7 +215,8 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) {
trie, _, err := util.DepositTrieFromDeposits(deposits)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: root[:],
@@ -265,7 +268,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
trie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: root[:],
@@ -281,7 +285,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
for i := 0; i < int(factor-1); i++ {
assert.NoError(t, trie.Insert(dataRoot[:], i))
trieRoot := trie.HashTreeRoot()
trieRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data.DepositRoot = trieRoot[:]
eth1Data.DepositCount = uint64(i + 1)

View File

@@ -33,6 +33,10 @@ const (
ExecutionBlockByHashMethod = "eth_getBlockByHash"
// ExecutionBlockByNumberMethod request string for JSON-RPC.
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
// Defines the seconds to wait before timing out engine endpoints with block execution semantics (newPayload, forkchoiceUpdated).
payloadAndForkchoiceUpdatedTimeout = 8 * time.Second
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
// ForkchoiceUpdatedResponse is the response kind received by the
@@ -65,7 +69,9 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
defer func() {
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.PayloadStatus{}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
if err != nil {
@@ -75,8 +81,6 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
switch result.Status {
case pb.PayloadStatus_INVALID_BLOCK_HASH:
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
return nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:
@@ -99,6 +103,9 @@ func (s *Service) ForkchoiceUpdated(
forkchoiceUpdatedLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &ForkchoiceUpdatedResponse{}
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
if err != nil {
@@ -110,8 +117,6 @@ func (s *Service) ForkchoiceUpdated(
}
resp := result.Status
switch resp.Status {
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
case pb.PayloadStatus_SYNCING:
return nil, nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:
@@ -132,6 +137,9 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Execut
getPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(defaultEngineTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.ExecutionPayload{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
return result, handleRPCError(err)
@@ -147,10 +155,14 @@ func (s *Service) ExchangeTransitionConfiguration(
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
zeroBigNum := big.NewInt(0)
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
d := time.Now().Add(defaultEngineTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.TransitionConfiguration{}
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
return handleRPCError(err)
}
// We surface an error to the user if local configuration settings mismatch
// according to the response from the execution node.
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]

View File

@@ -217,27 +217,6 @@ func TestClient_HTTP(t *testing.T) {
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
@@ -274,18 +253,6 @@ func TestClient_HTTP(t *testing.T) {
require.ErrorContains(t, "could not validate block hash", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
@@ -819,13 +786,6 @@ func fixtures() map[string]interface{} {
},
PayloadId: &id,
}
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
},
PayloadId: &id,
}
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
@@ -856,10 +816,6 @@ func fixtures() map[string]interface{} {
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
LatestValidHash: nil,
}
inValidTerminalBlockStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
}
acceptedStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
LatestValidHash: nil,
@@ -877,21 +833,19 @@ func fixtures() map[string]interface{} {
LatestValidHash: foo[:],
}
return map[string]interface{}{
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
"AcceptedStatus": acceptedStatus,
"SyncingStatus": syncingStatus,
"InvalidStatus": invalidStatus,
"UnknownStatus": unknownStatus,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
"TransitionConfiguration": transitionCfg,
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"AcceptedStatus": acceptedStatus,
"SyncingStatus": syncingStatus,
"InvalidStatus": invalidStatus,
"UnknownStatus": unknownStatus,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
"TransitionConfiguration": transitionCfg,
}
}

View File

@@ -19,8 +19,6 @@ var (
ErrUnknownPayload = errors.New("payload does not exist or is not available")
// ErrUnknownPayloadStatus when the payload status is unknown.
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrUnsupportedScheme for unsupported URL schemes.
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")

View File

@@ -5,6 +5,7 @@ import (
"encoding/binary"
"fmt"
"math/big"
"strings"
"time"
"github.com/ethereum/go-ethereum"
@@ -16,7 +17,9 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
state_native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -33,22 +36,28 @@ var (
const eth1DataSavingInterval = 1000
const maxTolerableDifference = 50
const defaultEth1HeaderReqLimit = uint64(1000)
const depositlogRequestLimit = 10000
const depositLogRequestLimit = 10000
const additiveFactorMultiplier = 0.10
const multiplicativeDecreaseDivisor = 2
var errTimedOut = errors.New("net/http: request canceled")
func tooMuchDataRequestedError(err error) bool {
// this error is only infura specific (other providers might have different error messages)
return err.Error() == "query returned more than 10000 results"
}
func clientTimedOutError(err error) bool {
return strings.Contains(err.Error(), errTimedOut.Error())
}
// Eth2GenesisPowchainInfo retrieves the genesis time and eth1 block number of the beacon chain
// from the deposit contract.
func (s *Service) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return s.chainStartData.GenesisTime, big.NewInt(int64(s.chainStartData.GenesisBlock))
}
// ProcessETH1Block processes the logs from the provided eth1Block.
// ProcessETH1Block processes logs from the provided eth1 block.
func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
query := ethereum.FilterQuery{
Addresses: []common.Address{
@@ -71,7 +80,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
}
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, blkNum); err != nil {
if err := s.processChainStartFromBlockNum(ctx, blkNum); err != nil {
return err
}
}
@@ -79,7 +88,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
}
// ProcessLog is the main method which handles the processing of all
// logs from the deposit contract on the ETH1.0 chain.
// logs from the deposit contract on the eth1 chain.
func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) error {
s.processingLock.RLock()
defer s.processingLock.RUnlock()
@@ -98,7 +107,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
}
// ProcessDepositLog processes the log which had been received from
// the ETH1.0 chain by trying to ascertain which participant deposited
// the eth1 chain by trying to ascertain which participant deposited
// in the contract.
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Log) error {
pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data)
@@ -131,7 +140,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
depositHash, err := depositData.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "Unable to determine hashed value of deposit")
return errors.Wrap(err, "unable to determine hashed value of deposit")
}
// Defensive check to validate incoming index.
@@ -149,20 +158,27 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
if !s.chainStartData.Chainstarted {
proof, err := s.depositTrie.MerkleProof(int(index))
if err != nil {
return errors.Wrap(err, "Unable to generate merkle proof for deposit")
return errors.Wrap(err, "unable to generate merkle proof for deposit")
}
deposit.Proof = proof
}
// We always store all historical deposits in the DB.
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
if err != nil {
return errors.Wrap(err, "unable to insert deposit into cache")
}
validData := true
if !s.chainStartData.Chainstarted {
s.chainStartData.ChainstartDeposits = append(s.chainStartData.ChainstartDeposits, deposit)
root := s.depositTrie.HashTreeRoot()
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
@@ -172,7 +188,11 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
validData = false
}
} else {
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
}
if validData {
log.WithFields(logrus.Fields{
@@ -206,7 +226,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
}
// ProcessChainStart processes the log which had been received from
// the ETH1.0 chain by trying to determine when to start the beacon chain.
// the eth1 chain by trying to determine when to start the beacon chain.
func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, blockNumber *big.Int) {
s.chainStartData.Chainstarted = true
s.chainStartData.GenesisBlock = blockNumber.Uint64()
@@ -216,12 +236,16 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
for i := range s.chainStartData.ChainstartDeposits {
proof, err := s.depositTrie.MerkleProof(i)
if err != nil {
log.Errorf("Unable to generate deposit proof %v", err)
log.Errorf("unable to generate deposit proof %v", err)
}
s.chainStartData.ChainstartDeposits[i].Proof = proof
}
root := s.depositTrie.HashTreeRoot()
root, err := s.depositTrie.HashTreeRoot()
if err != nil { // This should never happen.
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
return
}
s.chainStartData.Eth1Data = &ethpb.Eth1Data{
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
DepositRoot: root[:],
@@ -238,15 +262,15 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
},
})
if err := s.savePowchainData(s.ctx); err != nil {
// continue on, if the save fails as this will get re-saved
// continue on if the save fails as this will get re-saved
// in the next interval.
log.Error(err)
}
}
// createGenesisTime adds in the genesis delay to the eth1 block time
// on which it was triggered.
func createGenesisTime(timeStamp uint64) uint64 {
// adds in the genesis delay to the eth1 block time
// on which it was triggered.
return timeStamp + params.BeaconConfig().GenesisDelay
}
@@ -283,7 +307,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
return nil
}
latestFollowHeight, err := s.followBlockHeight(ctx)
latestFollowHeight, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -309,7 +333,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
// reset our query and end block in this case.
withinLimit := remainingLogs < depositlogRequestLimit
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
@@ -404,9 +428,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
// logs from the period last polled to now.
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
// We request for the nth block behind the current head, in order to have
// stabilized logs when we retrieve it from the 1.0 chain.
// stabilized logs when we retrieve it from the eth1 chain.
requestedBlock, err := s.followBlockHeight(ctx)
requestedBlock, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -448,18 +472,17 @@ func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int)
return bHash, timeStamp, nil
}
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
func (s *Service) processChainStartFromBlockNum(ctx context.Context, blkNum *big.Int) error {
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
if err != nil {
return err
}
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
s.processChainStartIfReady(ctx, bHash, blkNum, timeStamp)
return nil
}
func (s *Service) checkHeaderForChainstart(ctx context.Context, header *gethTypes.Header) {
s.checkForChainstart(ctx, header.Hash(), header.Number, header.Time)
func (s *Service) processChainStartFromHeader(ctx context.Context, header *gethTypes.Header) {
s.processChainStartIfReady(ctx, header.Hash(), header.Number, header.Time)
}
func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, headersMap map[uint64]*gethTypes.Header,
@@ -475,7 +498,7 @@ func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, heade
i--
continue
}
s.checkHeaderForChainstart(ctx, h)
s.processChainStartFromHeader(ctx, h)
}
}
return nil
@@ -495,7 +518,7 @@ func (s *Service) currentCountAndTime(ctx context.Context, blockTime uint64) (ui
return valCount, createGenesisTime(blockTime)
}
func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
valCount, genesisTime := s.currentCountAndTime(ctx, blockTime)
if valCount == 0 {
return
@@ -507,9 +530,15 @@ func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, bl
}
}
// save all powchain related metadata to disk.
// savePowchainData saves all powchain related metadata to disk.
func (s *Service) savePowchainData(ctx context.Context) error {
pbState, err := v1.ProtobufBeaconState(s.preGenesisState.InnerStateUnsafe())
var pbState *ethpb.BeaconState
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStatePhase0(s.preGenesisState.InnerStateUnsafe())
} else {
pbState, err = v1.ProtobufBeaconState(s.preGenesisState.InnerStateUnsafe())
}
if err != nil {
return err
}

View File

@@ -589,7 +589,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
s := newPowchainService(t, testAcc, beaconDB)
s.checkForChainstart(context.Background(), [32]byte{}, nil, 0)
s.processChainStartIfReady(context.Background(), [32]byte{}, nil, 0)
require.LogsDoNotContain(t, hook, "Could not determine active validator count from pre genesis state")
}

View File

@@ -13,7 +13,6 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -30,7 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
nativev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/v1"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
@@ -98,7 +97,6 @@ type POWBlockFetcher interface {
BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error)
BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error)
BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
}
// Chain defines a standard interface for the powchain service in Prysm.
@@ -114,7 +112,6 @@ type RPCDataFetcher interface {
Close()
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
}
// RPCClient defines the rpc methods required to interact with the eth1 node.
@@ -139,10 +136,10 @@ type config struct {
}
// Service fetches important information about the canonical
// Ethereum ETH1.0 chain via a web3 endpoint using an ethclient. The Random
// Beacon Chain requires synchronization with the ETH1.0 chain's current
// blockhash, block number, and access to logs within the
// Validator Registration Contract on the ETH1.0 chain to kick off the beacon
// eth1 chain via a web3 endpoint using an ethclient.
// The beacon chain requires synchronization with the eth1 chain's current
// block hash, block number, and access to logs within the
// Validator Registration Contract on the eth1 chain to kick off the beacon
// chain's validator registration process.
type Service struct {
connectedETH1 bool
@@ -152,7 +149,7 @@ type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
headTicker *time.Ticker
eth1HeadTicker *time.Ticker
httpLogger bind.ContractFilterer
eth1DataFetcher RPCDataFetcher
rpcClient RPCClient
@@ -173,11 +170,11 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
cancel()
return nil, errors.Wrap(err, "could not setup deposit trie")
return nil, errors.Wrap(err, "could not set up deposit trie")
}
genState, err := transition.EmptyGenesisState()
if err != nil {
return nil, errors.Wrap(err, "could not setup genesis state")
return nil, errors.Wrap(err, "could not set up genesis state")
}
s := &Service{
@@ -201,7 +198,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
},
lastReceivedMerkleIndex: -1,
preGenesisState: genState,
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
}
for _, opt := range opts {
@@ -225,7 +222,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return s, nil
}
// Start a web3 service's main event loop.
// Start the powchain service's main event loop.
func (s *Service) Start() {
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
log.WithError(err).Error("Could not connect to execution endpoint")
@@ -273,7 +270,7 @@ func (s *Service) Stop() error {
func (s *Service) ClearPreGenesisData() {
s.chainStartData.ChainstartDeposits = []*ethpb.Deposit{}
if features.Get().EnableNativeState {
s.preGenesisState = &nativev1.BeaconState{}
s.preGenesisState = &native.BeaconState{}
} else {
s.preGenesisState = &v1.BeaconState{}
}
@@ -372,7 +369,7 @@ func (s *Service) ETH1ConnectionErrors() []error {
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
func (s *Service) followedBlockHeight(_ context.Context) (uint64, error) {
latestValidBlock := uint64(0)
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
@@ -386,8 +383,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
}
s.cfg.depositCache.InsertDepositContainers(ctx, ctrs)
if !s.chainStartData.Chainstarted {
// do not add to pending cache
// if no genesis state exists.
// Do not add to pending cache if no genesis state exists.
validDepositsCount.Add(float64(s.preGenesisState.Eth1DepositIndex()))
return nil
}
@@ -395,7 +391,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
if err != nil {
return err
}
// Default to all deposits post-genesis deposits in
// Default to all post-genesis deposits in
// the event we cannot find a finalized state.
currIndex := genesisState.Eth1DepositIndex()
chkPt, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
@@ -411,17 +407,17 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// Set deposit index to the one in the current archived state.
currIndex = fState.Eth1DepositIndex()
// when a node pauses for some time and starts again, the deposits to finalize
// accumulates. we finalize them here before we are ready to receive a block.
// When a node pauses for some time and starts again, the deposits to finalize
// accumulates. We finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// to be included (rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
// Deposit proofs are only used during state transition and can be safely removed to save space.
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
@@ -498,8 +494,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
return headers, nil
}
// safelyHandleHeader will recover and log any panic that occurs from the
// block
// safelyHandleHeader will recover and log any panic that occurs from the block
func safelyHandlePanic() {
if r := recover(); r != nil {
log.WithFields(logrus.Fields{
@@ -522,7 +517,7 @@ func (s *Service) handleETH1FollowDistance() {
log.Warn("Execution client is not syncing")
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
if err := s.processChainStartFromBlockNum(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
s.runError = err
log.Error(err)
return
@@ -530,9 +525,8 @@ func (s *Service) handleETH1FollowDistance() {
}
// If the last requested block has not changed,
// we do not request batched logs as this means there are no new
// logs for the powchain service to process. Also is a potential
// failure condition as would mean we have not respected the protocol
// threshold.
// logs for the powchain service to process. Also it is a potential
// failure condition as would mean we have not respected the protocol threshold.
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
log.Error("Beacon node is not respecting the follow distance")
return
@@ -595,7 +589,7 @@ func (s *Service) initPOWService() {
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
genBlock := s.chainStartData.GenesisBlock
// In the event our provided chainstart data references a non-existent blockhash
// In the event our provided chainstart data references a non-existent block hash,
// we assume the genesis block to be 0.
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
@@ -618,7 +612,7 @@ func (s *Service) initPOWService() {
}
}
// run subscribes to all the services for the ETH1.0 chain.
// run subscribes to all the services for the eth1 chain.
func (s *Service) run(done <-chan struct{}) {
s.runError = nil
@@ -636,7 +630,7 @@ func (s *Service) run(done <-chan struct{}) {
s.updateConnectedETH1(false)
log.Debug("Context closed, exiting goroutine")
return
case <-s.headTicker.C:
case <-s.eth1HeadTicker.C:
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
s.pollConnectionStatus(s.ctx)
@@ -692,10 +686,10 @@ func (s *Service) logTillChainStart(ctx context.Context) {
}
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
// instead of making multiple RPC requests to the ETH1 endpoint.
// instead of making multiple RPC requests to the eth1 endpoint.
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
// Find the end block to request from.
end, err := s.followBlockHeight(ctx)
end, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -703,20 +697,48 @@ func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
if err != nil {
return err
}
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
_, err = s.batchRequestHeaders(start, end)
if err != nil {
return err
return s.cacheBlockHeaders(start, end)
}
// Caches block headers from the desired range.
func (s *Service) cacheBlockHeaders(start, end uint64) error {
batchSize := s.cfg.eth1HeaderReqLimit
for i := start; i < end; i += batchSize {
startReq := i
endReq := i + batchSize
if endReq > end {
endReq = end
}
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
_, err := s.batchRequestHeaders(startReq, endReq)
if err != nil {
if clientTimedOutError(err) {
// Reduce batch size as eth1 node is
// unable to respond to the request in time.
batchSize /= 2
// Always have it greater than 0.
if batchSize == 0 {
batchSize += 1
}
// Reset request value
if i > batchSize {
i -= batchSize
}
continue
}
return err
}
}
return nil
}
// determines the earliest voting block from which to start caching all our previous headers from.
// Determines the earliest voting block from which to start caching all our previous headers from.
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
genesisTime := s.chainStartData.GenesisTime
currSlot := slots.CurrentSlot(genesisTime)
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
// In the event genesis has not occurred yet, we just request to go back follow_distance blocks.
if genesisTime == 0 || currSlot == 0 {
earliestBlk := uint64(0)
if followBlock > params.BeaconConfig().Eth1FollowDistance {
@@ -745,9 +767,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
if eth1DataInDB == nil {
return nil
}
s.depositTrie = trie.CreateTrieFromProto(eth1DataInDB.Trie)
s.chainStartData = eth1DataInDB.ChainstartData
var err error
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
if err != nil {
return err
}
s.chainStartData = eth1DataInDB.ChainstartData
if !reflect.ValueOf(eth1DataInDB.BeaconState).IsZero() {
s.preGenesisState, err = v1.InitializeFromProto(eth1DataInDB.BeaconState)
if err != nil {
@@ -763,7 +788,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
return nil
}
// validates that all deposit containers are valid and have their relevant indices
// Validates that all deposit containers are valid and have their relevant indices
// in order.
func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
ctrLen := len(ctrs)
@@ -786,7 +811,7 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
return true
}
// validates the current powchain data saved and makes sure that any
// Validates the current powchain data is saved and makes sure that any
// embedded genesis state is correctly accounted for.
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
genState, err := s.cfg.beaconDB.GenesisState(ctx)
@@ -802,7 +827,13 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
return errors.Wrap(err, "unable to retrieve eth1 data")
}
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
pbState, err := v1.ProtobufBeaconState(s.preGenesisState.InnerStateUnsafe())
var pbState *ethpb.BeaconState
var err error
if features.Get().EnableNativeState {
pbState, err = native.ProtobufBeaconStatePhase0(s.preGenesisState.InnerStateUnsafe())
} else {
pbState, err = v1.ProtobufBeaconState(s.preGenesisState.InnerStateUnsafe())
}
if err != nil {
return err
}

View File

@@ -12,7 +12,9 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
@@ -265,7 +267,7 @@ func TestFollowBlock_OK(t *testing.T) {
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
h, err := web3Service.followBlockHeight(context.Background())
h, err := web3Service.followedBlockHeight(context.Background())
require.NoError(t, err)
assert.Equal(t, baseHeight, h, "Unexpected block height")
numToForward := uint64(2)
@@ -278,7 +280,7 @@ func TestFollowBlock_OK(t *testing.T) {
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
h, err = web3Service.followBlockHeight(context.Background())
h, err = web3Service.followedBlockHeight(context.Background())
require.NoError(t, err)
assert.Equal(t, expectedHeight, h, "Unexpected block height")
}
@@ -843,3 +845,50 @@ func TestETH1Endpoints(t *testing.T) {
// Check endpoints are all present.
assert.DeepSSZEqual(t, endpoints, s1.ETH1Endpoints(), "Unexpected http endpoint slice")
}
func TestService_CacheBlockHeaders(t *testing.T) {
rClient := &slowRPCClient{limit: 1000}
s := &Service{
cfg: &config{eth1HeaderReqLimit: 1000},
rpcClient: rClient,
headerCache: newHeaderCache(),
}
assert.NoError(t, s.cacheBlockHeaders(1, 1000))
assert.Equal(t, 1, rClient.numOfCalls)
// Reset Num of Calls
rClient.numOfCalls = 0
assert.NoError(t, s.cacheBlockHeaders(1000, 3000))
// 1000 - 2000 would be 1001 headers which is higher than our request limit, it
// is then reduced to 500 and tried again.
assert.Equal(t, 5, rClient.numOfCalls)
}
type slowRPCClient struct {
limit int
numOfCalls int
}
func (s *slowRPCClient) Close() {
panic("implement me")
}
func (s *slowRPCClient) BatchCall(b []rpc.BatchElem) error {
s.numOfCalls++
if len(b) > s.limit {
return errTimedOut
}
for _, e := range b {
num, err := hexutil.DecodeBig(e.Args[0].(string))
if err != nil {
return err
}
h := &gethTypes.Header{Number: num}
*e.Result.(*gethTypes.Header) = *h
}
return nil
}
func (s *slowRPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
panic("implement me")
}

View File

@@ -28,6 +28,7 @@ type EngineClient struct {
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
}
// NewPayload --
@@ -37,8 +38,11 @@ func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, e.ErrForkchoiceUpdated
}

View File

@@ -71,8 +71,3 @@ func (_ *FaultyMockPOWChain) ClearPreGenesisData() {
func (_ *FaultyMockPOWChain) IsConnectedToETH1() bool {
return true
}
// BlockExistsWithCache --
func (f *FaultyMockPOWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
return f.BlockExists(ctx, hash)
}

View File

@@ -179,11 +179,6 @@ func (m *POWChain) InsertBlock(height int, time uint64, hash []byte) *POWChain {
return m
}
// BlockExistsWithCache --
func (m *POWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
return m.BlockExists(ctx, hash)
}
func SetupRPCServer() (*rpc.Server, string, error) {
srv := rpc.NewServer()
if err := srv.RegisterName("eth", &testETHRPC{}); err != nil {

View File

@@ -16,44 +16,78 @@ import (
"github.com/r3labs/sse"
)
const (
versionHeader = "Eth-Consensus-Version"
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
)
type sszConfig struct {
sszPath string
fileName string
responseJson sszResponseJson
responseJson sszResponse
}
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v1/debug/beacon/states/{state_id}/ssz",
fileName: "beacon_state.ssz",
responseJson: &beaconStateSSZResponseJson{},
responseJson: &sszResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v1/beacon/blocks/{block_id}/ssz",
fileName: "beacon_block.ssz",
responseJson: &blockSSZResponseJson{},
responseJson: &sszResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v2/debug/beacon/states/{state_id}/ssz",
fileName: "beacon_state.ssz",
responseJson: &beaconStateSSZResponseV2Json{},
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v2/beacon/blocks/{block_id}/ssz",
fileName: "beacon_block.ssz",
responseJson: &blockSSZResponseV2Json{},
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
}
func handleSubmitBlindedBlockSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
) (handled bool) {
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
}
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
fileName: "produce_beacon_block.ssz",
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleProduceBlindedBlockSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
) (handled bool) {
config := sszConfig{
fileName: "produce_blinded_beacon_block.ssz",
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
@@ -69,7 +103,7 @@ func handleGetSSZ(
return false
}
if errJson := prepareSSZRequestForProxying(m, endpoint, req, config.sszPath); errJson != nil {
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
@@ -112,6 +146,53 @@ func handleGetSSZ(
return true
}
func handlePostSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
config sszConfig,
) (handled bool) {
if !sszPosted(req) {
return false
}
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
prepareCustomHeaders(req)
if errJson := preparePostedSSZData(req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponse, errJson := m.ProxyRequest(req)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return
}
if respHasError {
return
}
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
return true
}
func sszRequested(req *http.Request) bool {
accept, ok := req.Header["Accept"]
if !ok {
@@ -125,24 +206,57 @@ func sszRequested(req *http.Request) bool {
return false
}
func prepareSSZRequestForProxying(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
req *http.Request, sszPath string,
) apimiddleware.ErrorJson {
func sszPosted(req *http.Request) bool {
ct, ok := req.Header["Content-Type"]
if !ok {
return false
}
if len(ct) != 1 {
return false
}
return ct[0] == "application/octet-stream"
}
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
req.URL.Scheme = "http"
req.URL.Host = m.GatewayAddress
req.RequestURI = ""
req.URL.Path = sszPath
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, []string{}); errJson != nil {
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
return errJson
}
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
return errJson
}
// We have to add new segments after handling parameters because it changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
return nil
}
func serializeMiddlewareResponseIntoSSZ(respJson sszResponseJson) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
func prepareCustomHeaders(req *http.Request) {
ver := req.Header.Get(versionHeader)
if ver != "" {
req.Header.Del(versionHeader)
req.Header.Add(grpcVersionHeader, ver)
}
}
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
buf, err := io.ReadAll(req.Body)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
}
j := sszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
data, err := json.Marshal(j)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
}
req.Body = io.NopCloser(bytes.NewBuffer(data))
req.ContentLength = int64(len(data))
req.Header.Set("Content-Type", "application/json")
return nil
}
func serializeMiddlewareResponseIntoSSZ(respJson sszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
// Serialize the SSZ part of the deserialized value.
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
if err != nil {
@@ -168,7 +282,7 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
w.Header().Set("Eth-Consensus-Version", respVersion)
w.Header().Set(versionHeader, respVersion)
if statusCodeHeader != "" {
code, err := strconv.Atoi(statusCodeHeader)
if err != nil {

View File

@@ -70,11 +70,21 @@ func TestPrepareSSZRequestForProxying(t *testing.T) {
var body bytes.Buffer
request := httptest.NewRequest("GET", "http://foo.example", &body)
errJson := prepareSSZRequestForProxying(middleware, endpoint, request, "/ssz")
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/internal/ssz", request.URL.Path)
}
func TestPreparePostedSszData(t *testing.T) {
var body bytes.Buffer
body.Write([]byte("body"))
request := httptest.NewRequest("POST", "http://foo.example", &body)
preparePostedSSZData(request)
assert.Equal(t, int64(19), request.ContentLength)
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
}
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
t.Run("ok", func(t *testing.T) {
j := testSSZResponseJson{
@@ -133,7 +143,7 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "attachment; filename=test.ssz", v[0])
v, ok = writer.Header()["Eth-Consensus-Version"]
v, ok = writer.Header()[versionHeader]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "version", v[0])

View File

@@ -110,11 +110,13 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlockSSZ}
case "/eth/v1/beacon/blinded_blocks":
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlindedBlockSSZ}
case "/eth/v1/beacon/blocks/{block_id}":
endpoint.GetResponse = &blockResponseJson{}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
@@ -221,6 +223,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
case "/eth/v1/validator/blinded_blocks/{slot}":
endpoint.GetResponse = &produceBlindedBlockResponseJson{}
endpoint.RequestURLLiterals = []string{"slot"}
@@ -228,6 +231,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
case "/eth/v1/validator/attestation_data":
endpoint.GetResponse = &produceAttestationDataResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}

View File

@@ -7,6 +7,10 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
)
//----------------
// Requests and responses.
//----------------
// genesisResponseJson is used in /beacon/genesis API endpoint.
type genesisResponseJson struct {
Data *genesisResponse_GenesisJson `json:"data"`
@@ -457,7 +461,7 @@ type blindedBeaconBlockBodyBellatrixJson struct {
Deposits []*depositJson `json:"deposits"`
VoluntaryExits []*signedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *syncAggregateJson `json:"sync_aggregate"`
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload"`
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload_header"`
}
type executionPayloadJson struct {
@@ -489,7 +493,7 @@ type executionPayloadHeaderJson struct {
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
}
@@ -833,63 +837,38 @@ type syncCommitteeContributionJson struct {
// SSZ
// ---------------
// sszResponseJson is a common abstraction over all SSZ responses.
type sszResponseJson interface {
type sszRequestJson struct {
Data string `json:"data"`
}
// sszResponse is a common abstraction over all SSZ responses.
type sszResponse interface {
SSZVersion() string
SSZData() string
}
// blockSSZResponseJson is used in /beacon/blocks/{block_id} API endpoint.
type blockSSZResponseJson struct {
type sszResponseJson struct {
Data string `json:"data"`
}
func (ssz *blockSSZResponseJson) SSZData() string {
func (ssz *sszResponseJson) SSZData() string {
return ssz.Data
}
func (*blockSSZResponseJson) SSZVersion() string {
func (*sszResponseJson) SSZVersion() string {
return strings.ToLower(ethpbv2.Version_PHASE0.String())
}
// blockSSZResponseV2Json is used in /v2/beacon/blocks/{block_id} API endpoint.
type blockSSZResponseV2Json struct {
type versionedSSZResponseJson struct {
Version string `json:"version"`
Data string `json:"data"`
}
func (ssz *blockSSZResponseV2Json) SSZData() string {
func (ssz *versionedSSZResponseJson) SSZData() string {
return ssz.Data
}
func (ssz *blockSSZResponseV2Json) SSZVersion() string {
return ssz.Version
}
// beaconStateSSZResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
type beaconStateSSZResponseJson struct {
Data string `json:"data"`
}
func (ssz *beaconStateSSZResponseJson) SSZData() string {
return ssz.Data
}
func (*beaconStateSSZResponseJson) SSZVersion() string {
return strings.ToLower(ethpbv2.Version_PHASE0.String())
}
// beaconStateSSZResponseV2Json is used in /v2/debug/beacon/states/{state_id} API endpoint.
type beaconStateSSZResponseV2Json struct {
Version string `json:"version"`
Data string `json:"data"`
}
func (ssz *beaconStateSSZResponseV2Json) SSZData() string {
return ssz.Data
}
func (ssz *beaconStateSSZResponseV2Json) SSZVersion() string {
func (ssz *versionedSSZResponseJson) SSZVersion() string {
return ssz.Version
}

View File

@@ -44,6 +44,7 @@ go_library(
"//consensus-types/wrapper:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -56,6 +57,7 @@ go_library(
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
@@ -114,6 +116,7 @@ go_test(
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],

View File

@@ -18,16 +18,21 @@ import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/network/forks"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
"github.com/prysmaticlabs/prysm/proto/migration"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
const versionHeader = "eth-consensus-version"
// blockIdParseError represents an error scenario where a block ID could not be parsed.
type blockIdParseError struct {
message string
@@ -109,7 +114,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoot)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -175,7 +180,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
if !isOptimistic {
isOptimistic, err = bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -223,6 +228,45 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
return &emptypb.Empty{}, nil
}
// SubmitBlockSSZ instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
// included in the beacon chain. The beacon node is not required to validate the signed BeaconBlock, and a successful
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
// still broadcast but a different status code is returned (202).
//
// The provided block must be SSZ-serialized.
func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
defer span.End()
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
}
unmarshaler, err := detect.FromForkVersion(forkVer)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
}
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
@@ -259,6 +303,48 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
return &emptypb.Empty{}, nil
}
// SubmitBlindedBlockSSZ instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
// to be included in the beacon chain. The beacon node is not required to validate the signed
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
// successful. The beacon node is expected to integrate the new block into its state, and
// therefore validate the block internally, however blocks which fail the validation are still
// broadcast but a different status code is returned (202).
//
// The provided block must be SSZ-serialized.
func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
defer span.End()
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
}
unmarshaler, err := detect.FromForkVersion(forkVer)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
}
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
// GetBlock retrieves block details for given block ID.
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
@@ -371,7 +457,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -393,7 +479,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
}
// GetBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.BlockSSZResponseV2, error) {
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
defer span.End()
@@ -413,7 +499,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
}
// ErrUnsupportedPhase0Block means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedPhase0Block) {
@@ -437,7 +523,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
}
// ErrUnsupportedAltairBlock means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedAltairBlock) {
@@ -461,7 +547,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
}
// ErrUnsupportedBellatrixBlock means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedBellatrixBlock) {
@@ -543,7 +629,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
}
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -616,7 +702,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
@@ -21,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/grpc/metadata"
)
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
@@ -187,9 +189,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
tests := []struct {
@@ -287,9 +290,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
header, err := bs.GetBlockHeader(ctx, &ethpbv1.BlockRequest{BlockId: []byte("head")})
require.NoError(t, err)
@@ -312,9 +316,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
b2 := util.NewBeaconBlock()
@@ -416,9 +421,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
slot := types.Slot(30)
headers, err := bs.ListBlockHeaders(ctx, &ethpbv1.BlockHeadersRequest{
@@ -557,6 +563,289 @@ func TestServer_SubmitBlock_OK(t *testing.T) {
})
}
func TestServer_SubmitBlockSSZ_OK(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
// INFO: This code block can be removed once Bellatrix
// fork epoch is set to a value other than math.MaxUint64
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
}
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
// INFO: This code block can be removed once Bellatrix
// fork epoch is set to a value other than math.MaxUint64
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBlindedBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
}
func TestSubmitBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
@@ -836,9 +1125,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -955,9 +1245,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1074,9 +1365,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1194,9 +1486,10 @@ func TestServer_GetBlockV2(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
blk, err := bs.GetBlockV2(ctx, &ethpbv2.BlockRequestV2{
@@ -1395,9 +1688,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
@@ -1485,9 +1779,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
@@ -1513,9 +1808,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -1615,9 +1911,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1717,9 +2014,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1820,9 +2118,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
resp, err := bs.ListBlockAttestations(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),

View File

@@ -34,7 +34,9 @@ type Server struct {
StateGenService stategen.StateManager
StateFetcher statefetcher.Fetcher
HeadFetcher blockchain.HeadFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
V1Alpha1ValidatorServer *v1alpha1validator.Server
SyncChecker sync.Checker
CanonicalHistory *stategen.CanonicalHistory
HeadUpdater blockchain.HeadUpdater
}

View File

@@ -75,7 +75,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -103,7 +103,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
return nil, helpers.PrepareStateFetchGRPCError(err)
}
fork := st.Fork()
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -133,7 +133,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -80,13 +80,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
@@ -107,13 +109,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -138,12 +142,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(ctx, &eth.StateRequest{
@@ -167,12 +173,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -204,12 +212,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(ctx, &eth.StateRequest{
@@ -235,12 +245,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),

View File

@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -162,6 +162,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -169,8 +170,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
resp, err := s.ListSyncCommittees(ctx, req)
@@ -205,6 +207,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &mock.ChainService{Optimistic: true}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -212,8 +215,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListSyncCommittees(ctx, req)
require.NoError(t, err)
@@ -261,6 +265,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
}))
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -268,8 +273,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
StateFetcher: &futureSyncMockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{}
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod

View File

@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
return nil, status.Error(codes.NotFound, "Could not find validator")
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -80,7 +80,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
return nil, handleValContainerErr(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -143,7 +143,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -202,7 +202,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -33,12 +33,14 @@ func TestGetValidator(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head Get Validator by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
@@ -50,12 +52,14 @@ func TestGetValidator(t *testing.T) {
})
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
pubKey := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -93,12 +97,14 @@ func TestGetValidator(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
StateId: []byte("head"),
@@ -117,12 +123,14 @@ func TestListValidators(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head List All Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -136,12 +144,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -158,12 +168,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -184,12 +196,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -212,12 +226,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown public key is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
existingKey := st.PubkeyAtIndex(types.ValidatorIndex(1))
@@ -232,12 +248,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown index is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("1"), []byte("99999")}
@@ -261,12 +279,14 @@ func TestListValidators(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
StateId: []byte("head"),
@@ -349,12 +369,14 @@ func TestListValidators_Status(t *testing.T) {
}
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -384,12 +406,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -418,12 +442,14 @@ func TestListValidators_Status(t *testing.T) {
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*35))
t.Run("Head List All EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -451,12 +477,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -484,12 +512,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -532,12 +562,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, st.SetBalances(balances))
t.Run("Head List Validators Balance by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -554,12 +586,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -579,12 +613,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -613,12 +649,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -640,12 +678,14 @@ func TestListCommittees(t *testing.T) {
epoch := slots.ToEpoch(st.Slot())
t.Run("Head All Committees", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -660,12 +700,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
epoch := types.Epoch(10)
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -679,12 +721,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
slot := types.Slot(4)
@@ -704,12 +748,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -729,12 +775,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -764,12 +812,14 @@ func TestListCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{

View File

@@ -39,7 +39,7 @@ func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest)
}
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateSSZResponse, error) {
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
defer span.End()
@@ -53,7 +53,7 @@ func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateReque
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
}
return &ethpbv1.BeaconStateSSZResponse{Data: sszState}, nil
return &ethpbv2.SSZContainer{Data: sszState}, nil
}
// GetBeaconStateV2 returns the full beacon state for a given state ID.
@@ -65,7 +65,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -121,21 +121,32 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
}
// GetBeaconStateSSZV2 returns the SSZ-serialized version of the full beacon state object for given state ID.
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.BeaconStateSSZResponseV2, error) {
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZV2")
defer span.End()
state, err := ds.StateFetcher.State(ctx, req.StateId)
st, err := ds.StateFetcher.State(ctx, req.StateId)
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
sszState, err := state.MarshalSSZ()
sszState, err := st.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
}
var ver ethpbv2.Version
switch st.Version() {
case version.Phase0:
ver = ethpbv2.Version_PHASE0
case version.Altair:
ver = ethpbv2.Version_ALTAIR
case version.Bellatrix:
ver = ethpbv2.Version_BELLATRIX
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
return &ethpbv2.BeaconStateSSZResponseV2{Data: sszState}, nil
return &ethpbv2.SSZContainer{Data: sszState, Version: ver}, nil
}
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
@@ -167,7 +178,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
}
for i := range headRoots {
isOptimistic, err := ds.HeadFetcher.IsOptimisticForRoot(ctx, headRoots[i])
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
}

View File

@@ -44,8 +44,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -60,8 +61,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -76,8 +78,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -102,8 +105,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -153,6 +157,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
})
t.Run("Altair", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateAltair(t, 1)
@@ -171,6 +176,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
})
t.Run("Bellatrix", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
@@ -189,6 +195,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
}
@@ -238,8 +245,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
}}
chainService := &blockchainmock.ChainService{}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)
@@ -257,8 +266,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
}
t.Run("optimistic head", func(t *testing.T) {
chainService := &blockchainmock.ChainService{Optimistic: true}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)

View File

@@ -12,7 +12,8 @@ import (
// Server defines a server implementation of the gRPC Beacon Chain service,
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
type Server struct {
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}

View File

@@ -39,7 +39,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
}
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockchain.HeadFetcher) (bool, error) {
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return false, errors.Wrap(err, "could not get state root")
@@ -50,7 +50,7 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockch
if err != nil {
return false, errors.Wrap(err, "could not get header root")
}
isOptimistic, err := headFetcher.IsOptimisticForRoot(ctx, headRoot)
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
if err != nil {
return false, errors.Wrap(err, "could not check if block is optimistic")
}

View File

@@ -58,14 +58,14 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
t.Run("optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, true, o)
})
t.Run("not optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, false, o)
})

View File

@@ -13,13 +13,15 @@ import (
// Server defines a server implementation of the gRPC Validator service,
// providing RPC endpoints intended for validator clients.
type Server struct {
HeadFetcher blockchain.HeadFetcher
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
}

View File

@@ -58,7 +58,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -142,7 +142,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -258,7 +258,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -348,6 +348,76 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlockV2SSZ requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
//
// The produced block is in SSZ form.
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_PHASE0,
Data: sszBlock,
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_ALTAIR,
Data: sszBlock,
}, nil
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_BELLATRIX,
Data: sszBlock,
}, nil
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
// which can then be signed by a proposer and submitted.
//
@@ -413,6 +483,79 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
// which can then be signed by a proposer and submitted.
//
// The produced block is in SSZ form.
//
// Pre-Bellatrix, this endpoint will return a regular block.
func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_PHASE0,
Data: sszBlock,
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_ALTAIR,
Data: sszBlock,
}, nil
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_BELLATRIX,
Data: sszBlock,
}, nil
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
func (vs *Server) PrepareBeaconProposer(
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,

File diff suppressed because it is too large Load Diff

View File

@@ -46,4 +46,5 @@ type Server struct {
StateGen stategen.StateManager
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
HeadUpdater blockchain.HeadUpdater
}

View File

@@ -499,7 +499,7 @@ func (bs *Server) GetValidatorParticipation(
}
// Use the last slot of requested epoch to obtain current and previous epoch attestations.
// This ensures that we don't miss previous attestations when input requested epochs.
startSlot, err := slots.EpochEnd(requestedEpoch)
endSlot, err := slots.EpochEnd(requestedEpoch)
if err != nil {
return nil, err
}
@@ -507,14 +507,14 @@ func (bs *Server) GetValidatorParticipation(
// The above check ensures a future *epoch* isn't requested, but the end slot of the requested epoch could still
// be past the current slot. In that case, use the current slot as the best approximation of the requested epoch.
// Replayer will make sure the slot ultimately used is canonical.
if startSlot > currentSlot {
startSlot = currentSlot
if endSlot > currentSlot {
endSlot = currentSlot
}
// ReplayerBuilder ensures that a canonical chain is followed to the slot
beaconState, err := bs.ReplayerBuilder.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
beaconState, err := bs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", startSlot, err))
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", endSlot, err))
}
var v []*precompute.Validator
var b *precompute.Balance

View File

@@ -178,9 +178,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
params.OverrideBeaconConfig(cfg)
as := &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: true},
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
_, err := as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
s, ok := status.FromError(err)
@@ -191,10 +192,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
as = &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
AttestationCache: cache.NewAttestationCache(),
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
AttestationCache: cache.NewAttestationCache(),
}
_, err = as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
require.NoError(t, err)

View File

@@ -197,10 +197,16 @@ func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *eth
// validate that the provided deposit trie matches up with the canonical eth1 data provided.
func validateDepositTrie(trie *trie.SparseMerkleTrie, canonicalEth1Data *ethpb.Eth1Data) (bool, error) {
if trie == nil || canonicalEth1Data == nil {
return false, errors.New("nil trie or eth1data provided")
}
if trie.NumOfItems() != int(canonicalEth1Data.DepositCount) {
return false, errors.Errorf("wanted the canonical count of %d but received %d", canonicalEth1Data.DepositCount, trie.NumOfItems())
}
rt := trie.HashTreeRoot()
rt, err := trie.HashTreeRoot()
if err != nil {
return false, err
}
if !bytes.Equal(rt[:], canonicalEth1Data.DepositRoot) {
return false, errors.Errorf("wanted the canonical deposit root of %#x but received %#x", canonicalEth1Data.DepositRoot, rt)
}

View File

@@ -120,18 +120,20 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
burnAddr := bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength)
switch err == nil {
case true:
feeRecipient = recipient
case errors.As(err, kv.ErrNotFoundFeeRecipient):
// If fee recipient is not found in DB and not set from beacon node CLI,
// use the burn address.
if bytes.Equal(feeRecipient.Bytes(), burnAddr) {
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
logrus.WithFields(logrus.Fields{
"validatorIndex": vIdx,
"burnAddress": burnAddr,
}).Error("Fee recipient not set. Using burn address")
"burnAddress": fieldparams.EthBurnAddressHex,
}).Warn("Fee recipient is currently using the burn address, " +
"you will not be rewarded transaction fees on this setting. " +
"Please set a different eth address as the fee recipient. " +
"Please refer to our documentation for instructions")
}
default:
return nil, errors.Wrap(err, "could not get fee recipient in db")

View File

@@ -82,6 +82,10 @@ func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequ
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
}
if err := vs.HeadUpdater.UpdateHead(ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
}
// Retrieve the parent block as the current head of the canonical chain.
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {

View File

@@ -72,6 +72,7 @@ func TestProposer_GetBlock_OK(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -158,6 +159,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
HeadUpdater: &mock.ChainService{},
MockEth1Votes: true,
SlashingsPool: slashings.NewPool(),
AttPool: attestations.NewPool(),
@@ -503,10 +505,14 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
blk := util.NewBeaconBlock()
@@ -638,10 +644,14 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
bs := &Server{
@@ -737,10 +747,14 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -833,10 +847,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root)
}
bs := &Server{
@@ -927,10 +945,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -1036,10 +1058,14 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
bs := &Server{
@@ -1055,8 +1081,10 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
trie, err := bs.depositTrie(ctx, &ethpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
require.NoError(t, err)
actualRoot := trie.HashTreeRoot()
expectedRoot := depositTrie.HashTreeRoot()
actualRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
expectedRoot, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
}
@@ -1146,10 +1174,14 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
d := depositCache.AllDepositContainers(ctx)
origDeposit, ok := proto.Clone(d[0].Deposit).(*ethpb.Deposit)
@@ -1177,8 +1209,10 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
trie, err := bs.depositTrie(ctx, &ethpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
require.NoError(t, err)
expectedRoot := depositTrie.HashTreeRoot()
actualRoot := trie.HashTreeRoot()
expectedRoot, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
actualRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
}
@@ -1230,7 +1264,8 @@ func TestProposer_ValidateDepositTrie(t *testing.T) {
assert.NoError(t, trie.Insert([]byte{'a'}, 0))
assert.NoError(t, trie.Insert([]byte{'b'}, 1))
assert.NoError(t, trie.Insert([]byte{'c'}, 2))
rt := trie.HashTreeRoot()
rt, err := trie.HashTreeRoot()
require.NoError(t, err)
return &ethpb.Eth1Data{DepositRoot: rt[:], DepositCount: 3, BlockHash: []byte{}}
},
trieCreator: func() *trie.SparseMerkleTrie {
@@ -1274,7 +1309,9 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
require.NoError(t, err)
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, root))
t.Run("choose highest count", func(t *testing.T) {
t.Skip()
@@ -1976,10 +2013,14 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -2066,6 +2107,7 @@ func TestProposer_GetBeaconBlock_PreForkEpoch(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -2178,6 +2220,7 @@ func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -2331,6 +2374,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: c,
@@ -2369,7 +2413,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
// Operator sets default fee recipient to not be burned through beacon node cli.
@@ -2380,7 +2424,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
params.OverrideBeaconConfig(cfg)
_, err = proposerServer.GetBeaconBlock(ctx, req)
require.NoError(t, err)
require.LogsDoNotContain(t, newHook, "Fee recipient not set. Using burn address")
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
}
func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
@@ -2393,7 +2437,7 @@ func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
proposerServer := &Server{HeadFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
req := &ethpb.BlockRequest{
Slot: bellatrixSlot + 1,
}

View File

@@ -43,6 +43,7 @@ type Server struct {
AttestationCache *cache.AttestationCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
ForkFetcher blockchain.ForkFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
@@ -50,6 +51,7 @@ type Server struct {
DepositFetcher depositcache.DepositFetcher
ChainStartFetcher powchain.ChainStartFetcher
Eth1InfoFetcher powchain.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier

View File

@@ -137,7 +137,9 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, root))
trie, err := v1.InitializeFromProtoUnsafe(beaconState)
require.NoError(t, err)
vs := &Server{

View File

@@ -255,7 +255,7 @@ func (vs *Server) optimisticStatus(ctx context.Context) error {
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return nil
}
optimistic, err := vs.HeadFetcher.IsOptimistic(ctx)
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
}

View File

@@ -41,7 +41,9 @@ func TestValidatorStatus_DepositedEth1(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -84,7 +86,9 @@ func TestValidatorStatus_Deposited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -134,7 +138,9 @@ func TestValidatorStatus_PartiallyDeposited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -202,7 +208,9 @@ func TestValidatorStatus_Pending(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
@@ -247,7 +255,9 @@ func TestValidatorStatus_Active(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
// Active because activation epoch <= current epoch < exit epoch.
activeEpoch := helpers.ActivationExitEpoch(0)
@@ -334,7 +344,9 @@ func TestValidatorStatus_Exiting(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -391,7 +403,9 @@ func TestValidatorStatus_Slashing(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -449,7 +463,9 @@ func TestValidatorStatus_Exited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -532,11 +548,15 @@ func TestActivationStatus_OK(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
dep = deposits[2]
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
root, err = depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
vs := &Server{
Ctx: context.Background(),
@@ -583,7 +603,7 @@ func TestActivationStatus_OK(t *testing.T) {
}
func TestOptimisticStatus(t *testing.T) {
server := &Server{HeadFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
err := server.optimisticStatus(context.Background())
require.NoError(t, err)
@@ -592,14 +612,14 @@ func TestOptimisticStatus(t *testing.T) {
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
err = server.optimisticStatus(context.Background())
s, ok := status.FromError(err)
require.Equal(t, true, ok)
require.DeepEqual(t, codes.Unavailable, s.Code())
require.ErrorContains(t, errOptimisticMode.Error(), err)
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
err = server.optimisticStatus(context.Background())
require.NoError(t, err)
}
@@ -677,7 +697,9 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) {
deposit := &ethpb.Deposit{
Data: depData,
}
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), root))
}
@@ -758,10 +780,14 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
require.NoError(t, err)
dep := deposits[0]
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
dep = deposits[2]
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
root, err = depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
vs := &Server{
Ctx: context.Background(),
@@ -918,7 +944,9 @@ func TestValidatorStatus_Invalid(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{

View File

@@ -42,8 +42,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
params.OverrideBeaconConfig(cfg)
server := &Server{
HeadFetcher: &mock.ChainService{Optimistic: true},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
_, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
s, ok := status.FromError(err)
@@ -52,8 +53,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
require.ErrorContains(t, errOptimisticMode.Error(), err)
server = &Server{
HeadFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
}
_, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
require.NoError(t, err)

View File

@@ -78,6 +78,7 @@ type Config struct {
BeaconMonitoringPort int
BeaconDB db.HeadAccessDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
HeadUpdater blockchain.HeadUpdater
HeadFetcher blockchain.HeadFetcher
CanonicalFetcher blockchain.CanonicalFetcher
ForkFetcher blockchain.ForkFetcher
@@ -110,6 +111,7 @@ type Config struct {
MaxMsgSize int
ExecutionEngineCaller powchain.EngineCaller
ProposerIdsCache *cache.ProposerPayloadIDsCache
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}
// NewService instantiates a new RPC service instance that will
@@ -189,6 +191,7 @@ func (s *Service) Start() {
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
HeadUpdater: s.cfg.HeadUpdater,
ForkFetcher: s.cfg.ForkFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
@@ -196,6 +199,7 @@ func (s *Service) Start() {
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.POWChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
@@ -215,6 +219,7 @@ func (s *Service) Start() {
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
HeadUpdater: s.cfg.HeadUpdater,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
AttestationsPool: s.cfg.AttestationsPool,
@@ -228,7 +233,8 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
}
nodeServer := &nodev1alpha1.Server{
@@ -261,6 +267,7 @@ func (s *Service) Start() {
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
HeadUpdater: s.cfg.HeadUpdater,
HeadFetcher: s.cfg.HeadFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
CanonicalFetcher: s.cfg.CanonicalFetcher,
@@ -297,6 +304,7 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
@@ -335,6 +343,7 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)

View File

@@ -10,6 +10,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/state/state-native/custom-types:go_default_library",
"//beacon-chain/state/state-native/types:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -16,7 +16,7 @@ type FieldTrie struct {
*sync.RWMutex
reference *stateutil.Reference
fieldLayers [][]*[32]byte
field types.FieldIndex
field types.BeaconStateField
dataType types.DataType
length uint64
numOfElems int
@@ -25,7 +25,7 @@ type FieldTrie struct {
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
// trie according to the given parameters. Depending on whether the field is a basic/composite array
// which is either fixed/variable length, it will appropriately determine the trie.
func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
func NewFieldTrie(field types.BeaconStateField, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
if elements == nil {
return &FieldTrie{
field: field,
@@ -36,10 +36,19 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
numOfElems: 0,
}, nil
}
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
var fieldRoots [][32]byte
var err error
if field.Native() {
fieldRoots, err = fieldConvertersNative(field, []uint64{}, elements, true)
} else {
fieldRoots, err = fieldConverters(field, []uint64{}, elements, true)
}
if err != nil {
return nil, err
}
if err := validateElements(field, dataType, elements, length); err != nil {
return nil, err
}
@@ -84,10 +93,18 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if len(indices) == 0 {
return f.TrieRoot()
}
fieldRoots, err := fieldConverters(f.field, indices, elements, false)
var fieldRoots [][32]byte
var err error
if f.field.Native() {
fieldRoots, err = fieldConvertersNative(f.field, indices, elements, false)
} else {
fieldRoots, err = fieldConverters(f.field, indices, elements, false)
}
if err != nil {
return [32]byte{}, err
}
if err := f.validateIndices(indices); err != nil {
return [32]byte{}, err
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -18,7 +19,7 @@ import (
)
// ProofFromMerkleLayers creates a proof starting at the leaf index of the state Merkle layers.
func ProofFromMerkleLayers(layers [][][]byte, startingLeafIndex types.FieldIndex) [][]byte {
func ProofFromMerkleLayers(layers [][][]byte, startingLeafIndex int) [][]byte {
// The merkle tree structure looks as follows:
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
proof := make([][]byte, 0)
@@ -49,7 +50,7 @@ func (f *FieldTrie) validateIndices(idxs []uint64) error {
return nil
}
func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error {
func validateElements(field types.BeaconStateField, dataType types.DataType, elements interface{}, length uint64) error {
if dataType == types.CompressedArray {
comLength, err := field.ElemsInChunk()
if err != nil {
@@ -65,68 +66,118 @@ func validateElements(field types.FieldIndex, dataType types.DataType, elements
}
// fieldConverters converts the corresponding field and the provided elements to the appropriate roots.
func fieldConverters(field types.FieldIndex, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
func fieldConverters(field types.BeaconStateField, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch field {
case types.BlockRoots:
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.BlockRoots:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for block roots")
}
return convertBlockRoots(indices, elements, convertAll)
case types.StateRoots:
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.StateRoots:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for state roots")
}
return convertStateRoots(indices, elements, convertAll)
case types.RandaoMixes:
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.RandaoMixes:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for randao mixes")
}
return convertRandaoMixes(indices, elements, convertAll)
case types.Eth1DataVotes:
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return handleEth1DataSlice(val, indices, convertAll)
return convertEth1DataVotes(indices, elements, convertAll)
case types.Validators:
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return handleValidatorSlice(val, indices, convertAll)
return convertValidators(indices, elements, convertAll)
case types.PreviousEpochAttestations, types.CurrentEpochAttestations:
val, ok := elements.([]*ethpb.PendingAttestation)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestationSlice(val, indices, convertAll)
return convertAttestations(indices, elements, convertAll)
case types.Balances:
val, ok := elements.([]uint64)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
}
return handleBalanceSlice(val, indices, convertAll)
return convertBalances(indices, elements, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
// fieldConvertersNative converts the corresponding field and the provided elements to the appropriate roots.
func fieldConvertersNative(field types.BeaconStateField, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch field {
case nativetypes.BlockRoots:
return convertBlockRoots(indices, elements, convertAll)
case nativetypes.StateRoots:
return convertStateRoots(indices, elements, convertAll)
case nativetypes.RandaoMixes:
return convertRandaoMixes(indices, elements, convertAll)
case nativetypes.Eth1DataVotes:
return convertEth1DataVotes(indices, elements, convertAll)
case nativetypes.Validators:
return convertValidators(indices, elements, convertAll)
case nativetypes.PreviousEpochAttestations, nativetypes.CurrentEpochAttestations:
return convertAttestations(indices, elements, convertAll)
case nativetypes.Balances:
return convertBalances(indices, elements, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
func convertBlockRoots(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.BlockRoots:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for block roots")
}
}
func convertStateRoots(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.StateRoots:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for state roots")
}
}
func convertRandaoMixes(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch val := elements.(type) {
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.RandaoMixes:
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for randao mixes")
}
}
func convertEth1DataVotes(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return handleEth1DataSlice(val, indices, convertAll)
}
func convertValidators(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return handleValidatorSlice(val, indices, convertAll)
}
func convertAttestations(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]*ethpb.PendingAttestation)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestationSlice(val, indices, convertAll)
}
func convertBalances(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
val, ok := elements.([]uint64)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
}
return handleBalanceSlice(val, indices, convertAll)
}
// handleByteArrays computes and returns byte arrays in a slice of root format.
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)

Some files were not shown because too many files have changed in this diff Show More