Compare commits

...

44 Commits

Author SHA1 Message Date
terencechain
2fa646e7c5 Merge branch 'develop' into new-slot-before-propose 2022-05-20 06:54:01 -07:00
Nishant Das
a984605064 Scenario End To End Testing (#10696)
* add new changes

* fix it all

* add nicer scenario

* some more cleanup

* restructure tests

* godoc

* skip one scenario

* space

* fix test

* clean up

* fix conflicts

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-20 09:34:10 +00:00
Nishant Das
f28b47bd87 Raise Soft File Descriptor Limit Up To The Hard Limit (#10650)
* add changes

* comment

* kasey's review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-20 08:12:52 +00:00
kasey
588dea83b7 Config registry (#10683)
* test coverage and updates to config twiddlers

* LoadChainConfigFile error if SetActive conflicts

* lint

* wip working around test issues

* more fixes, mass test updates

* lint

* linting

* thanks deepsource!

* fix undeclared vars

* fixing more undefined

* fix a bug, make a bug, repeat

* gaz

* use stock mainnet in case fork schedule matters

* remove unused KnownConfigs

* post-merge cleanup

* eliminating OverrideBeaconConfig outside tests

* more cleanup of OverrideBeaconConfig outside tests

* config for interop w/ genesis gen support

* improve var name

* API on package instead of exported value

* cleanup remainders of "registry" naming

* Nishant feedback

* add ropstein to configset

* lint

* lint #2

* ✂️

* revert accidental commented line

* check if active is nil (replace called on empty)

* Nishant feedback

* replace OverrideBeaconConfig call

* update interop instructions w/ new flag

* don't let interop replace config set via cli flags

Co-authored-by: kasey <kasey@users.noreply.github.com>
2022-05-20 07:16:53 +00:00
terence tsao
90df2e4d00 Add new slot lock 2022-05-19 13:41:39 -07:00
terence tsao
c43ad9d561 Call NewSlot before propose block 2022-05-19 11:37:56 -07:00
Nishant Das
1012ec1915 Drop Down Expected Sync Participation During Fork (#10708)
* reduce

* skip
2022-05-19 12:28:56 +00:00
Raul Jordan
b74947aa75 Fix Slasher E2E Flakiness (#10715)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-19 08:51:31 +00:00
kasey
e3f69e4fad run e2e for 12 epochs (#10717)
* run for the original number of epochs

* to avoid race, wait 1/2 epoch after 1st ready node

* Update testing/endtoend/minimal_e2e_test.go

* Update testing/endtoend/minimal_e2e_test.go

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-05-19 06:45:58 +00:00
terencechain
092e9e1d19 Clean up various warnings (#10710)
* Clean up various warnings

* Update beacon-chain/rpc/prysm/v1alpha1/debug/state_test.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Fix redundant casting genState

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-05-19 04:38:04 +00:00
Sammy Rosso
1c51f6d1be Fix Tests in sync/validate_beacon_blocks.go to Pass for the Right Reasons (#10711)
* test: better InvalidSignature validation

Added digest to topic so that the test would not fail at decodePubsubMessage.
Generated a valid signature from an invalid key so that it would be considered invalid.
Added a check for the correct error: ErrSigFailedToVerify.
Related to #9791.

* test: better BlockAlreadyPresentInDB validation

Added digest to topic so that the test would not fail at decodePubsubMessage.
Added a check that the block is ignored.
Related to #9791.

* test: better Syncing validation

Replaced error silencing with NoError assertion.
Added check for the expected validation decision: ValidationIgnore.
Related to #9791.

* test: better IgnoreAndQueueBlocksFromNearFuture validation

Rename test to best describe it's function.
Replace error silencing with a check for the expected error.
Related to #9791.

* test: better RejectBlocksFromFuture validation

Added digest to topic so that the test would not fail at decodePubsubMessage.
Replaced error silencing with NoError assertion.
Added check for the expected validation decision: ValidationIgnore.
Related to #9791.

* test: better RejectBlocksFromThePast validation

Added digest to topic so that the test would not fail at decodePubsubMessage.
Replaced error silencing with a check for the expected error.
Added check for the expected validation decision: ValidationIgnore.
Related to #9791.

* test: better SeenProposerSlot validation

Added digest to topic so that the test would not fail at decodePubsubMessage.
Replaced error silencing with NoError assertion.
Added check for the expected validation decision: ValidationIgnore.
Related to #9791.

* test: fix topic and silenced errors

* set chain service slot

* test: better RejectBlocksFromBadParent validation

Set bad parent block.
Fixed expected errors and validation decision.

* revert: correct log msg for unknown parent block

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-19 03:45:45 +00:00
Preston Van Loon
ee6aa4d4ec rpc: Better connection handling (#10714) 2022-05-19 02:53:24 +00:00
terencechain
0d2696ed4e Add bopsten config and cli flag (#10700)
* Add bopsten config and cli flag

* Rename : )

* Update genesis time

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-18 19:29:24 +00:00
mick
03d44d7bfe Weak subjectivity warning copy update, warn -> info (#10699)
* warn -> info

* wrap

* lintfix

* mick-needs-a-better-IDE

* gofmt

* empty commit to trigger cicd

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-18 16:20:35 +00:00
james-prysm
b38e4ddc3e Web3signer: Bellatrix Block (#10590)
* initial commit

* fixing bazel

* removing extra space

* adding blindedbeaconblock

* adding unit tests

* fixing bazel build

* switching to switch statement

* fixing function parameters

* fixing ineffectual err error

* deleting unused files

* updating web3signer version to support bellatrix

* adding metrics

* testing longer run

* changing log level to help find errors

* changing logging back to all to better understand the issue

* testing better way to get public keys for web3signer on validator

* fixing bazel

* rolling back changes

* missed rolling back 1 thing

* adding flag back in

* adding comments back in

* testing lower participation

* adding logs to see web3signer keys for comparison

* fix bazel

* adding more logs temporariliy

* switching hex utility function

* web3signer doesn't support deposits, so changing this

* rolling back unintended unit test

* rolling back some changes for debugging

* Update validator/keymanager/remote-web3signer/v1/web3signer_types.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/components/web3remotesigner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/endtoend_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/endtoend_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing merge conflict

* Update validator/keymanager/remote-web3signer/v1/requests.go

* Update testing/endtoend/endtoend_test.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/keymanager/remote-web3signer/v1/requests.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* prysm doesn't currently support deposits for web3signer

* reverting back to old implementation

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
2022-05-18 15:20:20 +00:00
Mike Neuder
9fab9df61e Refactor validator accounts delete to remove cli context dependency (#10686)
* add functional options accounts delete

* bazel run //:gazelle -- fix

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-17 23:13:36 +00:00
Radosław Kapka
eedafac822 SSZ responses for block production (#10697)
* Simplify SSZ handling

* fix tests

* add version to SSZ proto and to GetBeaconStateSSZV2

* TestProduceBlockV2SSZ

* rest of tests

* middleware

* use constants

* some middleware changes

* test fix

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-17 20:13:06 +00:00
terencechain
e9ad7aeff8 Fix a bad forkchoice proposer boost test (#10705)
* Fix a bad forkchoice proposer boost test

* Improve tests

* Add fork regression

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-17 16:26:56 -03:00
terencechain
3cfef20938 Add better debugging logs for validate block p2p pipeline (#10698) 2022-05-17 17:27:52 +00:00
Potuz
e90284bc00 Add Error checks for nil inner state (#10701) 2022-05-17 17:38:35 +08:00
terencechain
01e15a033f Improve beacon node doesn't have a parent in db logs (#10689)
* Improve beacon node doesn't have a parent in db logs

* Update round_robin.go

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-16 18:51:27 +00:00
Radosław Kapka
f09b06d6f6 Allow SSZ-serialized blocks in publishBlindedBlock (#10679)
* SubmitBlockSSZ grpc

* SubmitBlockSSZ middleware

* test fixes

* use VersionedUnmarshaller

* use VersionedUnmarshaller

(cherry picked from commit 7388eeb963)

* tests

* fuzz: Add fuzz tests for sparse merkle trie (#10662)

* Add fuzz tests for sparse merkle trie and change HTR signature to return an error

* fix capitalization of error message

* Add engine timeout values (#10645)

* Add timeout values

* Update engine_client.go

* Update engine_client.go

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Cleanup of `stategen` package (#10607)

* powchain and stategen

* revert powchain changes

* rename field to blockRootsOfSavedStates

* rename params to blockRoot

* review feedback

* fix loop

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Process atts and update head before proposing (#10653)

* Process atts and updeate head

* Fix ctx

* New test and old tests

* Update validator_test.go

* Update validator_test.go

* Update service.go

* Rename to UpdateHead

* Update receive_attestation.go

* Update receive_attestation.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Add link to e2e docs in `README` (#10672)

* Improve `ReceiveBlock`'s comment (#10671)

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Call fcu on invalid payload (#10565)

* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* Cache and use justified and finalized payload block hash (#10657)

* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* do not export slotFromBlock

* simplify tests

* grpc

* middleware

* extract package-level consts

* Simplify SSZ handling

* fix tests

* test fixes

* test hack

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-16 17:59:43 +00:00
james-prysm
16e66ee1b8 fee-recipient: update error log to warn log (#10684)
* initial commit

* updating fee recipient on beacon node

* updating logs

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-16 14:40:13 +00:00
terencechain
3d3890205f Remove invalid terminal block error code (#10646)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-15 03:14:44 +00:00
terencechain
a90335b15e Blockchain: Use get_block to retrieve block (#10688)
* Use get block

* Remove unused errNilParentInDB

* Fix TestVerifyBlkDescendant

* Update beacon-chain/blockchain/service.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-05-15 02:23:01 +00:00
Radosław Kapka
8cd43d216f Changes to SSZ handling (#10687)
* Simplify SSZ handling

* fix tests

* add version to SSZ proto and to GetBeaconStateSSZV2

* remove unwanted methods

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-13 20:29:18 +00:00
Radosław Kapka
d25c0ec1a5 Fix bugs in /eth/v1/beacon/blinded_blocks (#10673)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-05-13 18:20:39 +00:00
Radosław Kapka
e1c4427ea5 Allow SSZ-serialized blocks in publishBlock (#10663)
* SubmitBlockSSZ grpc

* SubmitBlockSSZ middleware

* test fixes

* use VersionedUnmarshaller

(cherry picked from commit 7388eeb963)

* tests

* do not export slotFromBlock

* simplify tests

* extract package-level consts

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-13 10:54:45 +00:00
Preston Van Loon
7042791e31 fuzz: Fix off by one error in sparse merkle trie item insertion (#10668)
* fuzz: Fix off by one error in sparse merkle trie item insertion

* remove new line

* Move validation to the proto constructor

* fix build

* Add a unit test because @nisdas is going to ask for it

* fix up

* gaz

* Update container/trie/sparse_merkle.go

* Update container/trie/sparse_merkle_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
2022-05-13 07:02:43 +00:00
terencechain
e771585b77 Fix error.Is ordering (#10685)
* Fix error.is ordering for ErrUndefinedExecutionEngineError

* Update BUILD.bazel
2022-05-12 22:43:34 +00:00
Radosław Kapka
98622a052f Extract OptimisticSyncFetcher interface (#10654)
* Extract `OptimisticSyncFetcher` interface

* extract IsOptimistic

* fix tests

* more test fixes

* even more test fixes

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 17:23:45 +00:00
Potuz
61033ebea1 handle failure to update head (#10651)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 16:36:46 +00:00
Potuz
e808025b17 regression test off-by-one (#10675)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:49:37 +00:00
Radosław Kapka
7db0435ee0 Unify WARNING comments (#10678)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:25:44 +00:00
Nishant Das
1f086e4333 add more fuzz targets (#10682) 2022-05-12 10:47:29 -04:00
james-prysm
184e5be9de Fee recipient: checksum log (#10664)
* adding checksum check at validator client and beacon node

* adding validation and logs on validator client startup

* moving the log and validation

* fixing unit tests

* adding test for back checksum on validator client

* fixing bazel

* addressing comments

* fixing log display

* Update beacon-chain/node/config.go

* Apply suggestions from code review

* breaking up lines

* fixing unit test

* ugh another fix to unit test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 19:36:57 +00:00
terencechain
e33850bf51 Don't return nil with new head (#10680) 2022-05-11 11:33:10 -07:00
Preston Van Loon
cc643ac4cc native-state: Simplify MarshalSSZ (#10677)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 14:25:11 +00:00
Nishant Das
abefe1e9d5 Add Sync Block Topic Fuzz Target (#10676)
* add new target

* remove

* Update beacon-chain/sync/BUILD.bazel
2022-05-11 13:18:12 +00:00
Nishant Das
b4e89fb28b Add in State Fuzzing (#10375)
* add it in

* build tags

* gaz

* remove dependency on fast-ssz for now

* copy and comment

* add in native state

* fix build

* no assert on invalid input, just return

* Add failing case

* fix it up

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-05-11 12:47:54 +00:00
terencechain
4ad1c4df01 Cache and use justified and finalized payload block hash (#10657)
* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 21:20:28 +00:00
terencechain
6a197b47d9 Call fcu on invalid payload (#10565)
* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 20:02:00 +00:00
Radosław Kapka
d102421a25 Improve ReceiveBlock's comment (#10671)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 19:13:28 +00:00
Radosław Kapka
7b1490429c Add link to e2e docs in README (#10672) 2022-05-10 14:16:40 +00:00
314 changed files with 9198 additions and 3367 deletions

View File

@@ -26,15 +26,18 @@ You can use `bazel run //tools/genesis-state-gen` to create a deterministic gene
### Usage
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
- **--config-name=interop** string: name of the beacon chain config to use when generating the state. ex mainnet|minimal|interop
**deprecated flag: use --config-name instead**
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section. When using the `--interop-*` flags, the beacon node will assume the `interop` config should be used, unless a different config is specified on the command line.
```
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
bazel run //tools/genesis-state-gen -- --config-name interop --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
```
## Launching a Beacon Node + Validator Client
@@ -46,8 +49,10 @@ Open up two terminal windows, run:
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--force-clear-db \
--min-sync-peers=0 \
--interop-num-validators 64 \
--interop-eth1data-votes
```
@@ -69,8 +74,10 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--force-clear-db \
--min-sync-peers=0 \
--interop-genesis-state /path/to/genesis.ssz \
--interop-eth1data-votes
```

View File

@@ -46,10 +46,8 @@ const (
type StateOrBlockId string
const (
IdFinalized StateOrBlockId = "finalized"
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
IdJustified StateOrBlockId = "justified"
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
@@ -60,7 +58,7 @@ func IdFromRoot(r [32]byte) StateOrBlockId {
return StateOrBlockId(fmt.Sprintf("%#x", r))
}
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s types.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))

View File

@@ -137,6 +137,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
@@ -188,6 +189,7 @@ go_test(
"//beacon-chain/powchain/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -20,7 +20,7 @@ import (
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
// directly retrieves chain info related data.
// directly retrieve chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
FinalizationFetcher
@@ -32,8 +32,9 @@ type ChainInfoFetcher interface {
}
// HeadUpdater defines a common interface for methods in blockchain service
// which allow to update the head info
// which allow to update the head info and update propose boost score and justified check point via on_tick.
type HeadUpdater interface {
NewSlot(ctx context.Context, slot types.Slot) error
UpdateHead(context.Context) error
}
@@ -49,7 +50,7 @@ type GenesisFetcher interface {
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieves head related data.
// directly retrieve head related data.
type HeadFetcher interface {
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
@@ -61,8 +62,6 @@ type HeadFetcher interface {
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -79,7 +78,7 @@ type CanonicalFetcher interface {
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieves finalization and justification related data.
// directly retrieve finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
@@ -87,6 +86,12 @@ type FinalizationFetcher interface {
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
type OptimisticModeFetcher interface {
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
cp := s.store.FinalizedCheckpt()
@@ -238,7 +243,7 @@ func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validator
// GenesisValidatorsRoot returns the genesis validators
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
s.headLock.RLock()
@@ -305,7 +310,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
return v.PublicKey(), nil
}
// ForkChoicer returns the forkchoice interface
// ForkChoicer returns the forkchoice interface.
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
@@ -321,7 +326,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return s.IsOptimisticForRoot(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
@@ -351,7 +356,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
@@ -369,7 +374,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, err
}
// historical non-canonical blocks here are returned as optimistic for safety.
// Historical non-canonical blocks here are returned as optimistic for safety.
return !isCanonical, nil
}
@@ -378,7 +383,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
// ForkChoiceStore returns the fork choice store in the service
// ForkChoiceStore returns the fork choice store in the service.
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}

View File

@@ -51,7 +51,7 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
@@ -62,7 +62,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
}
@@ -73,7 +73,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
@@ -83,7 +83,7 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
}

View File

@@ -11,8 +11,6 @@ var (
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or
// block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")

View File

@@ -31,11 +31,9 @@ var (
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
finalizedRoot [32]byte
justifiedRoot [32]byte
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block hash")
}
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get justified block hash")
}
finalizedHash := s.store.FinalizedPayloadBlockHash()
justifiedHash := s.store.JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: justifiedHash,
FinalizedBlockHash: finalizedHash,
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return nil, err
}
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
if err != nil {
return nil, err
}
b, err := s.getBlock(ctx, r)
if err != nil {
return nil, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, err
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
})
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", headRoot),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return nil, ErrInvalidPayload
return pid, ErrInvalidPayload
default:
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return nil, err
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
return params.BeaconConfig().ZeroHash[:], nil
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
payload, err := blk.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return payload.BlockHash, nil
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
@@ -219,14 +235,10 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.B
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return nil
}
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return err
}
if parent == nil || parent.IsNil() {
return errNilParentInDB
}
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
if err != nil {
return err
@@ -266,7 +278,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": fieldparams.EthBurnAddressHex,
}).Error("Fee recipient not set. Using burn address")
}).Warn("Fee recipient is currently using the burn address, " +
"you will not be rewarded transaction fees on this setting. " +
"Please set a different eth address as the fee recipient. " +
"Please refer to our documentation for instructions")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
finalizedRoot: tt.finalizedRoot,
justifiedRoot: tt.justifiedRoot,
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
}
_, err := service.notifyForkchoiceUpdate(ctx, arg)
if tt.errString != "" {
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
}
//
//
// A <- B <- C <- D
// \
// ---------- E <- F
// \
// ------ G
// D is the current head, attestations for F and G come late, both are invalid.
// We switch recursively to F then G and finally to D.
//
// We test:
// 1. forkchoice removes blocks F and G from the forkchoice implementation
// 2. forkchoice removes the weights of these blocks
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
ba.Block.Body.ExecutionPayload.BlockNumber = 1
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
require.NoError(t, err)
bra, err := wba.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
bb := util.NewBeaconBlockBellatrix()
bb.Block.Body.ExecutionPayload.BlockNumber = 2
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
brb, err := wbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
bc := util.NewBeaconBlockBellatrix()
bc.Block.Body.ExecutionPayload.BlockNumber = 3
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
require.NoError(t, err)
brc, err := wbc.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
bd := util.NewBeaconBlockBellatrix()
pd := [32]byte{'D'}
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
bd.Block.Body.ExecutionPayload.BlockNumber = 4
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
require.NoError(t, err)
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
be := util.NewBeaconBlockBellatrix()
pe := [32]byte{'E'}
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
be.Block.Body.ExecutionPayload.BlockNumber = 5
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
require.NoError(t, err)
bre, err := wbe.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
bf := util.NewBeaconBlockBellatrix()
pf := [32]byte{'F'}
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
bf.Block.Body.ExecutionPayload.BlockNumber = 6
bf.Block.ParentRoot = bre[:]
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
require.NoError(t, err)
brf, err := wbf.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
bg := util.NewBeaconBlockBellatrix()
bg.Block.Body.ExecutionPayload.BlockNumber = 7
pg := [32]byte{'G'}
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
bg.Block.ParentRoot = bre[:]
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
require.NoError(t, err)
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
// Insert blocks into forkchoice
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
require.NoError(t, err)
require.Equal(t, brg, headRoot)
// Prepare Engine Mock to return invalid unless head is D, LVH = E
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
fc := &ethpb.Checkpoint{Epoch: 0, Root: bra[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.ErrorIs(t, ErrInvalidPayload, err)
// Ensure Head is D
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
@@ -537,11 +682,11 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
service.store.SetJustifiedCheckptAndPayloadHash(
&ethpb.Checkpoint{
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
}, [32]byte{'a'})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
err = service.optimisticCandidateBlock(ctx, tt.blk)
@@ -625,7 +770,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
@@ -803,7 +948,7 @@ func TestService_getPayloadHash(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = service.getPayloadHash(ctx, [32]byte{})
_, err = service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()
@@ -813,20 +958,20 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err := service.getPayloadHash(ctx, r)
h, err := service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
bb := util.NewBeaconBlockBellatrix()
h = []byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h
h = [32]byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err = service.getPayloadHash(ctx, r)
h, err = service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, []byte{'a'}, h)
require.DeepEqual(t, [32]byte{'a'}, h)
}

View File

@@ -40,7 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not update head")
}
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
headBlock, err := s.getBlock(ctx, headRoot)
if err != nil {
return err
}
@@ -86,7 +86,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
jb, err := s.getBlock(ctx, headStartRoot)
if err != nil {
return [32]byte{}, err
}
@@ -355,7 +355,7 @@ func (s *Service) notifyNewHeadEvent(
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}

View File

@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)

View File

@@ -6,7 +6,7 @@ import (
func init() {
// Override network name so that hardcoded genesis files are not loaded.
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
}

View File

@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
}
}
// warning: only use these opts when you are certain there are no db calls
// WARNING: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {

View File

@@ -30,6 +30,8 @@ import (
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
// store.justified_checkpoint = store.best_justified_checkpoint
func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
s.newSlotLock.Lock()
defer s.newSlotLock.Unlock()
// Reset proposer boost root in fork choice.
if err := s.cfg.ForkChoiceStore.ResetBoostedProposerRoot(ctx); err != nil {
@@ -64,7 +66,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
return err
}
if bytes.Equal(r, f.Root) {
s.store.SetJustifiedCheckpt(bj)
h, err := s.getPayloadHash(ctx, bj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
}
}
return nil

View File

@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
r = bytesutil.ToBytes32([]byte{'A'})
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33

View File

@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -195,9 +194,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
if newFinalized {
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
cp := postState.FinalizedCheckpoint()
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
cp = postState.CurrentJustifiedCheckpoint()
h, err = s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
@@ -413,6 +422,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, nil, err
}
}
for r, st := range boundaries {
@@ -426,14 +439,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
}
f := fCheckpoints[len(fCheckpoints)-1]
j := jCheckpoints[len(jCheckpoints)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
finalizedRoot: bytesutil.ToBytes32(f.Root),
justifiedRoot: bytesutil.ToBytes32(j.Root),
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return nil, nil, err
@@ -484,7 +493,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(fCheckpoint)
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
}
return nil
}

View File

@@ -97,13 +97,10 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
return errNilFinalizedInStore
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
if err != nil {
return err
}
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
return errors.New("nil finalized block")
}
finalizedBlk := finalizedBlkSigned.Block()
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
if err != nil {
@@ -208,7 +205,11 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
return errNilJustifiedInStore
}
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(cpt)
h, err := s.getPayloadHash(ctx, cpt.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
}
return nil
@@ -227,7 +228,11 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
s.store.SetJustifiedCheckpt(cp)
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return nil
}
@@ -350,7 +355,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
higherThanFinalized := slot > fSlot
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
b, err := s.getBlock(ctx, parentRoot)
if err != nil {
return err
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"strconv"
"sync"
"testing"
"time"
@@ -39,6 +40,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock_ProtoArray(t *testing.T) {
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
err = service.verifyBlkPreState(ctx, wb)
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -723,7 +729,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -768,7 +774,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -814,7 +820,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -863,7 +869,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -913,7 +919,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -974,7 +980,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1321,7 +1327,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
args: args{
finalizedRoot: [32]byte{'a'},
},
wantedErr: "nil finalized block",
wantedErr: "block not found in cache or db",
},
{
name: "could not get finalized block root in DB",
@@ -1350,7 +1356,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1378,7 +1384,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.originBlockRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.store.SetJustifiedCheckpt(currentCp)
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
newCp := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
@@ -1439,7 +1445,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1493,7 +1499,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1524,7 +1530,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
@@ -1563,7 +1569,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
@@ -1887,3 +1893,82 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
require.NoError(t, err)
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
require.NoError(t, err)
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
require.NoError(t, err)
logHook := logTest.NewGlobal()
for i := 0; i < 10; i++ {
var wg sync.WaitGroup
wg.Add(4)
go func() {
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
}
}

View File

@@ -176,7 +176,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
return
}
@@ -185,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return // We don't have the block, don't notify the engine and update head.
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
@@ -202,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return
}
arg := &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
}
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
if err != nil {

View File

@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
invalidStateErr := "Could not get state from db"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
@@ -191,14 +191,19 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
service.notifyEngineIfChangedHead(ctx, [32]byte{})
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {

View File

@@ -30,9 +30,9 @@ type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
}
// ReceiveBlock is a function that defines the the operations (minus pubsub)
// that are performed on blocks that is received from regular sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
@@ -85,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
@@ -94,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,

View File

@@ -34,7 +34,7 @@ func TestService_ReceiveBlock(t *testing.T) {
return blk
}
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc := params.BeaconConfig().Copy()
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
params.OverrideBeaconConfig(bc)
@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
h := [32]byte{'a'}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, h)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 10000000})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))

View File

@@ -66,6 +66,7 @@ type Service struct {
wsVerifier *WeakSubjectivityVerifier
store *store.Store
processAttestationsLock sync.Mutex
newSlotLock sync.Mutex
}
// config options for the service.
@@ -201,13 +202,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
}
s.cfg.ForkChoiceStore = forkChoicer
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
if fb == nil || fb.IsNil() {
return errNilFinalizedInStore
}
payloadHash, err := getBlockPayloadHash(fb.Block())
if err != nil {
return errors.Wrap(err, "could not get execution payload hash")
@@ -339,14 +337,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
finalizedState.Slot(), flags.HeadSync.Name)
}
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
if finalizedState == nil || finalizedState.IsNil() {
return errors.New("finalized state can't be nil")
}
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
return errors.New("finalized state and block can't be nil")
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
s.setHead(finalizedRoot, finalizedBlock, finalizedState)

View File

@@ -31,6 +31,7 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -86,9 +87,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
require.NoError(t, err)
err = beaconDB.SavePowchainData(ctx, &ethpb.ETH1ChainData{
BeaconState: pbState,
Trie: &ethpb.SparseMerkleTrie{},
Trie: mockTrie.ToProto(),
CurrentEth1Data: &ethpb.LatestETH1Data{
BlockHash: make([]byte, 32),
},
@@ -501,7 +504,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -522,7 +525,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -595,7 +598,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
@@ -618,7 +621,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)

View File

@@ -23,6 +23,13 @@ func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
return s.justifiedCheckpt
}
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.justifiedPayloadBlockHash
}
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
s.RLock()
@@ -37,6 +44,13 @@ func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
return s.finalizedCheckpt
}
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.finalizedPayloadBlockHash
}
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
@@ -51,18 +65,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.bestJustifiedCheckpt = cp
}
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.justifiedCheckpt = cp
s.justifiedPayloadBlockHash = h
}
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.finalizedCheckpt = cp
s.finalizedPayloadBlockHash = h
}
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.

View File

@@ -30,8 +30,10 @@ func Test_store_JustifiedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.JustifiedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetJustifiedCheckpt(cp)
h := [32]byte{'b'}
s.SetJustifiedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.JustifiedCheckpt())
require.Equal(t, h, s.JustifiedPayloadBlockHash())
}
func Test_store_FinalizedCheckpt(t *testing.T) {
@@ -39,8 +41,10 @@ func Test_store_FinalizedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.FinalizedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetFinalizedCheckpt(cp)
h := [32]byte{'b'}
s.SetFinalizedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.FinalizedCheckpt())
require.Equal(t, h, s.FinalizedPayloadBlockHash())
}
func Test_store_PrevFinalizedCheckpt(t *testing.T) {

View File

@@ -17,9 +17,11 @@ import (
// best_justified_checkpoint: Checkpoint
// proposerBoostRoot: Root
type Store struct {
justifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
justifiedCheckpt *ethpb.Checkpoint
justifiedPayloadBlockHash [32]byte
finalizedCheckpt *ethpb.Checkpoint
finalizedPayloadBlockHash [32]byte
bestJustifiedCheckpt *ethpb.Checkpoint
sync.RWMutex
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
// TODO(10094): Consider removing in v3.

View File

@@ -376,7 +376,7 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
// VerifyFinalizedBlkDescendant mocks VerifyBlkDescendant and always returns nil.
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
return s.VerifyBlkDescendantErr
}
@@ -451,8 +451,13 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool,
return s.Optimistic, nil
}
// ProcessAttestationsAndUpdateHead mocks the same method in the chain service.
// UpdateHead mocks the same method in the chain service.
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
// NewSlot mocks the same method in the chain service.
func (s *ChainService) NewSlot(context.Context, types.Slot) error {
return nil
}

View File

@@ -30,10 +30,11 @@ type WeakSubjectivityVerifier struct {
db weakSubjectivityDB
}
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil
@@ -58,7 +59,6 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
if v.verified || !v.enabled {
return nil
}
// Two conditions are described in the specs:
// IF epoch_number > store.finalized_checkpoint.epoch,
// then ASSERT during block sync that block with root block_root
@@ -92,6 +92,5 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
return nil
}
}
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
}

View File

@@ -79,7 +79,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
store: &store.Store{},
wsVerifier: wv,
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
if tt.wantErr == nil {
require.NoError(t, err)

View File

@@ -110,7 +110,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
sort.Slice(k, func(i, j int) bool {
return k[i].(string) < k[j].(string)
})
wanted := end - int(maxCommitteesCacheSize)
wanted := end - maxCommitteesCacheSize
s := bytesutil.ToBytes32([]byte(strconv.Itoa(wanted)))
assert.Equal(t, key(s), k[0], "incorrect key received for slot 190")

View File

@@ -20,7 +20,9 @@ import (
var runAmount = 25
func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
benchmark.SetBenchmarkConfig()
undo, err := benchmark.SetBenchmarkConfig()
require.NoError(b, err)
defer undo()
beaconState, err := benchmark.PreGenState1Epoch()
require.NoError(b, err)
cleanStates := clonedStates(beaconState)
@@ -37,7 +39,9 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
}
func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
benchmark.SetBenchmarkConfig()
undo, err := benchmark.SetBenchmarkConfig()
require.NoError(b, err)
defer undo()
beaconState, err := benchmark.PreGenState1Epoch()
require.NoError(b, err)
@@ -67,7 +71,9 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
}
func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
benchmark.SetBenchmarkConfig()
undo, err := benchmark.SetBenchmarkConfig()
require.NoError(b, err)
defer undo()
beaconState, err := benchmark.PreGenstateFullEpochs()
require.NoError(b, err)

View File

@@ -579,10 +579,10 @@ func TestProcessSlots_OnlyAltairEpoch(t *testing.T) {
func TestProcessSlots_OnlyBellatrixEpoch(t *testing.T) {
transition.SkipSlotCache.Disable()
conf := params.BeaconConfig()
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig().Copy()
conf.BellatrixForkEpoch = 5
params.OverrideBeaconConfig(conf)
defer params.UseMainnetConfig()
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*6))

View File

@@ -14,7 +14,7 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis

View File

@@ -16,7 +16,7 @@ var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
// ErrNotFoundBackfillBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
@@ -28,41 +29,70 @@ func testGenesisDataSaved(t *testing.T, db iface.Database) {
ctx := context.Background()
gb, err := db.GenesisBlock(ctx)
assert.NoError(t, err)
assert.NotNil(t, gb)
require.NoError(t, err)
require.NotNil(t, gb)
gbHTR, err := gb.Block().HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, err)
gss, err := db.StateSummary(ctx, gbHTR)
assert.NoError(t, err)
assert.NotNil(t, gss)
require.NoError(t, err)
require.NotNil(t, gss)
head, err := db.HeadBlock(ctx)
assert.NoError(t, err)
assert.NotNil(t, head)
require.NoError(t, err)
require.NotNil(t, head)
headHTR, err := head.Block().HashTreeRoot()
assert.NoError(t, err)
assert.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
require.NoError(t, err)
require.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
}
func TestLoadGenesisFromFile(t *testing.T) {
// for this test to work, we need the active config to have these properties:
// - fork version schedule that matches mainnnet.genesis.ssz
// - name that does not match params.MainnetName - otherwise we'll trigger the codepath that loads the state
// from the compiled binary.
// to do that, first we need to rewrite the mainnet fork schedule so it won't conflict with a renamed config that
// uses the mainnet fork schedule. construct the differently named mainnet config and set it active.
// finally, revert all this at the end of the test.
// first get the real mainnet out of the way by overwriting it schedule.
cfg, err := params.ByName(params.MainnetName)
require.NoError(t, err)
cfg = cfg.Copy()
reversioned := cfg.Copy()
params.FillTestVersions(reversioned, 127)
undo, err := params.SetActiveWithUndo(reversioned)
require.NoError(t, err)
defer func() {
require.NoError(t, undo())
}()
// then set up a new config, which uses the real mainnet schedule, and activate it
cfg.ConfigName = "genesis-test"
undo2, err := params.SetActiveWithUndo(cfg)
require.NoError(t, err)
defer func() {
require.NoError(t, undo2())
}()
fp := "testdata/mainnet.genesis.ssz"
rfp, err := bazel.Runfile(fp)
if err == nil {
fp = rfp
}
sb, err := os.ReadFile(fp)
assert.NoError(t, err)
require.NoError(t, err)
db := setupDB(t)
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
require.NoError(t, db.LoadGenesis(context.Background(), sb))
testGenesisDataSaved(t, db)
// Loading the same genesis again should not throw an error
assert.NoError(t, err)
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
require.NoError(t, err)
require.NoError(t, db.LoadGenesis(context.Background(), sb))
testGenesisDataSaved(t, db)
}
func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
@@ -80,11 +110,15 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
}
func TestEnsureEmbeddedGenesis(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.MainnetName
params.OverrideBeaconConfig(cfg)
// Embedded Genesis works with Mainnet config
cfg := params.MainnetConfig().Copy()
cfg.SecondsPerSlot = 1
undo, err := params.SetActiveWithUndo(cfg)
require.NoError(t, err)
defer func() {
require.NoError(t, undo())
}()
ctx := context.Background()
db := setupDB(t)

View File

@@ -6,7 +6,7 @@ import (
func init() {
// Override network name so that hardcoded genesis files are not loaded.
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
}

View File

@@ -12,11 +12,9 @@ import (
)
func TestSaveOrigin(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.MainnetName
params.OverrideBeaconConfig(cfg)
// Embedded Genesis works with Mainnet config
params.OverrideBeaconConfig(params.MainnetConfig().Copy())
ctx := context.Background()
db := setupDB(t)

View File

@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
node.parent.children = children[:len(children)-1]
break
}
}

View File

@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)
// ----- C
// /
// A <- B
// \
// ----------D
// D is invalid
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}

View File

@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -111,35 +111,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Insert a second block at slot 3 into the tree and boost its score.
// Insert a second block at slot 4 into the tree and boost its score.
// 0
// |
// 1
// |
// 2
// / \
// 3 4 <- HEAD
slot = types.Slot(3)
// 3 |
// 4 <- HEAD
slot = types.Slot(4)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
indexToHash(2),
zeroHash,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(3)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
CurrentSlot: clockSlot,
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
@@ -166,17 +168,27 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 34)
// (A: 54) -> (B: 44) -> (C: 10)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
// middle instead of the normal progression of (54 -> 44 -> 24).
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(54))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(44))
node3 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node3.weight, uint64(24))
node3 := f.store.nodeByRoot[indexToHash(3)]
require.Equal(t, node3.weight, uint64(10))
node4 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node4.weight, uint64(24))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)

View File

@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL.
//
// WARNING: This method returns an error if the root is not found in forkchoice
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()

View File

@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
zeroHash,
jEpoch,
fEpoch,
),
@@ -111,29 +111,30 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Insert a second block at slot 3 into the tree and boost its score.
// Insert a second block at slot 4 into the tree and boost its score.
// 0
// |
// 1
// |
// 2
// / \
// 3 4 <- HEAD
slot = types.Slot(3)
// 3 |
// 4 <- HEAD
slot = types.Slot(4)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
params.BeaconConfig().ZeroHash,
indexToHash(2),
zeroHash,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(3)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
@@ -166,14 +167,22 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 24)
// \_->(D: 10)
// (A: 54) -> (B: 44) -> (C: 10)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
// middle instead of the normal progression of (54 -> 44 -> 24).
require.Equal(t, f.store.nodes[1].weight, uint64(54))
require.Equal(t, f.store.nodes[2].weight, uint64(44))
require.Equal(t, f.store.nodes[3].weight, uint64(34))
require.Equal(t, f.store.nodes[3].weight, uint64(10))
require.Equal(t, f.store.nodes[4].weight, uint64(24))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)

View File

@@ -130,7 +130,7 @@ func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
func TestNewService(t *testing.T) {
config := &ValidatorMonitorConfig{}
tracked := []types.ValidatorIndex{}
var tracked []types.ValidatorIndex
ctx := context.Background()
_, err := NewService(ctx, config, tracked)
require.NoError(t, err)

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
@@ -22,19 +23,22 @@ func configureTracing(cliCtx *cli.Context) error {
)
}
func configureChainConfig(cliCtx *cli.Context) {
func configureChainConfig(cliCtx *cli.Context) error {
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
params.LoadChainConfigFile(chainConfigFileName, nil)
return params.LoadChainConfigFile(chainConfigFileName, nil)
}
return nil
}
func configureHistoricalSlasher(cliCtx *cli.Context) {
func configureHistoricalSlasher(cliCtx *cli.Context) error {
if cliCtx.Bool(flags.HistoricalSlasherNode.Name) {
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
// Save a state every 4 epochs.
c.SlotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 4
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
cmdConfig := cmd.Get()
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
@@ -45,40 +49,52 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
cmdConfig.MaxRPCPageSize,
)
}
return nil
}
func configureSafeSlotsToImportOptimistically(cliCtx *cli.Context) {
func configureSafeSlotsToImportOptimistically(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.SafeSlotsToImportOptimistically.Name) {
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
c.SafeSlotsToImportOptimistically = types.Slot(cliCtx.Int(flags.SafeSlotsToImportOptimistically.Name))
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
}
return nil
}
func configureSlotsPerArchivedPoint(cliCtx *cli.Context) {
func configureSlotsPerArchivedPoint(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.SlotsPerArchivedPoint.Name) {
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
c.SlotsPerArchivedPoint = types.Slot(cliCtx.Int(flags.SlotsPerArchivedPoint.Name))
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
}
return nil
}
func configureEth1Config(cliCtx *cli.Context) {
func configureEth1Config(cliCtx *cli.Context) error {
c := params.BeaconConfig().Copy()
if cliCtx.IsSet(flags.ChainID.Name) {
c := params.BeaconConfig()
c.DepositChainID = cliCtx.Uint64(flags.ChainID.Name)
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
}
if cliCtx.IsSet(flags.NetworkID.Name) {
c := params.BeaconConfig()
c.DepositNetworkID = cliCtx.Uint64(flags.NetworkID.Name)
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
}
if cliCtx.IsSet(flags.DepositContractFlag.Name) {
c := params.BeaconConfig()
c.DepositContractAddress = cliCtx.String(flags.DepositContractFlag.Name)
params.OverrideBeaconConfig(c)
if err := params.SetActive(c); err != nil {
return err
}
}
return nil
}
func configureNetwork(cliCtx *cli.Context) {
@@ -94,17 +110,22 @@ func configureNetwork(cliCtx *cli.Context) {
}
}
func configureInteropConfig(cliCtx *cli.Context) {
func configureInteropConfig(cliCtx *cli.Context) error {
// an explicit chain config was specified, don't mess with it
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil
}
genStateIsSet := cliCtx.IsSet(flags.InteropGenesisStateFlag.Name)
genTimeIsSet := cliCtx.IsSet(flags.InteropGenesisTimeFlag.Name)
numValsIsSet := cliCtx.IsSet(flags.InteropNumValidatorsFlag.Name)
votesIsSet := cliCtx.IsSet(flags.InteropMockEth1DataVotesFlag.Name)
if genStateIsSet || genTimeIsSet || numValsIsSet || votesIsSet {
bCfg := params.BeaconConfig()
bCfg.ConfigName = "interop"
params.OverrideBeaconConfig(bCfg)
if err := params.SetActive(params.InteropConfig().Copy()); err != nil {
return err
}
}
return nil
}
func configureExecutionSetting(cliCtx *cli.Context) error {
@@ -112,12 +133,22 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
return nil
}
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
ha := cliCtx.String(flags.SuggestedFeeRecipient.Name)
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
params.OverrideBeaconConfig(c)
return nil
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
if err != nil {
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
}
checksumAddress := common.HexToAddress(ha)
if !mixedcaseAddress.ValidChecksum() {
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
"The checksummed address is %s and will be used as the fee recipient. "+
"We recommend using a mixed-case address (checksum) "+
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
}
c.DefaultFeeRecipient = checksumAddress
return params.SetActive(c)
}

View File

@@ -26,7 +26,7 @@ func TestConfigureHistoricalSlasher(t *testing.T) {
set.Bool(flags.HistoricalSlasherNode.Name, true, "")
cliCtx := cli.NewContext(&app, set, nil)
configureHistoricalSlasher(cliCtx)
require.NoError(t, configureHistoricalSlasher(cliCtx))
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch*4, params.BeaconConfig().SlotsPerArchivedPoint)
assert.LogsContain(t, hook,
@@ -46,7 +46,7 @@ func TestConfigureSafeSlotsToImportOptimistically(t *testing.T) {
require.NoError(t, set.Set(flags.SafeSlotsToImportOptimistically.Name, strconv.Itoa(128)))
cliCtx := cli.NewContext(&app, set, nil)
configureSafeSlotsToImportOptimistically(cliCtx)
require.NoError(t, configureSafeSlotsToImportOptimistically(cliCtx))
assert.Equal(t, types.Slot(128), params.BeaconConfig().SafeSlotsToImportOptimistically)
}
@@ -60,7 +60,7 @@ func TestConfigureSlotsPerArchivedPoint(t *testing.T) {
require.NoError(t, set.Set(flags.SlotsPerArchivedPoint.Name, strconv.Itoa(100)))
cliCtx := cli.NewContext(&app, set, nil)
configureSlotsPerArchivedPoint(cliCtx)
require.NoError(t, configureSlotsPerArchivedPoint(cliCtx))
assert.Equal(t, types.Slot(100), params.BeaconConfig().SlotsPerArchivedPoint)
}
@@ -78,7 +78,7 @@ func TestConfigureProofOfWork(t *testing.T) {
require.NoError(t, set.Set(flags.DepositContractFlag.Name, "deposit-contract"))
cliCtx := cli.NewContext(&app, set, nil)
configureEth1Config(cliCtx)
require.NoError(t, configureEth1Config(cliCtx))
assert.Equal(t, uint64(100), params.BeaconConfig().DepositChainID)
assert.Equal(t, uint64(200), params.BeaconConfig().DepositNetworkID)
@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
func TestConfigureExecutionSetting(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
assert.LogsContain(t, hook,
"is not a checksum Ethereum address",
)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {
@@ -193,7 +198,7 @@ func TestConfigureInterop(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
configureInteropConfig(tt.flagSetter())
require.NoError(t, configureInteropConfig(tt.flagSetter()))
assert.DeepEqual(t, tt.configName, params.BeaconConfig().ConfigName)
})
}

View File

@@ -118,16 +118,32 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
features.ConfigureBeaconChain(cliCtx)
cmd.ConfigureBeaconChain(cliCtx)
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
flags.ConfigureGlobalFlags(cliCtx)
configureChainConfig(cliCtx)
configureHistoricalSlasher(cliCtx)
configureSafeSlotsToImportOptimistically(cliCtx)
configureSlotsPerArchivedPoint(cliCtx)
configureEth1Config(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return nil, err
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return nil, err
}
if err := configureSafeSlotsToImportOptimistically(cliCtx); err != nil {
return nil, err
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return nil, err
}
if err := configureEth1Config(cliCtx); err != nil {
return nil, err
}
configureNetwork(cliCtx)
configureInteropConfig(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return nil, err
}
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
@@ -788,6 +804,7 @@ func (b *BeaconNode) registerRPCService() error {
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,

View File

@@ -410,7 +410,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
@@ -441,7 +441,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
@@ -471,7 +471,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()

View File

@@ -114,6 +114,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.TraceLevel)
port := 2000
@@ -136,14 +137,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
UDPPort: uint(port),
}
params.SetupTestConfigCleanup(t)
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
nextForkEpoch := types.Epoch(i)
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
params.OverrideBeaconConfig(c)
@@ -209,7 +209,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
c.ForkVersionSchedule = map[[4]byte]types.Epoch{
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
{0, 0, 0, 1}: 1,
@@ -264,7 +264,7 @@ func TestAddForkEntry_Genesis(t *testing.T) {
db, err := enode.OpenDB("")
require.NoError(t, err)
bCfg := params.BeaconConfig()
bCfg := params.MainnetConfig().Copy()
bCfg.ForkVersionSchedule = map[[4]byte]types.Epoch{}
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
params.OverrideBeaconConfig(bCfg)

View File

@@ -15,7 +15,7 @@ import (
func TestCorrect_ActiveValidatorsCount(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.MainnetConfig()
cfg := params.MainnetConfig().Copy()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)

View File

@@ -54,7 +54,9 @@ func init() {
for k, v := range gossipTopicMappings {
GossipTypeMapping[reflect.TypeOf(v)] = k
}
// Specially handle Altair Objects.
// Specially handle Altair objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
// Specially handle Bellatrix objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBlindedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
}

View File

@@ -12,6 +12,7 @@ import (
)
func TestMappingHasNoDuplicates(t *testing.T) {
params.SetupTestConfigCleanup(t)
m := make(map[reflect.Type]bool)
for _, v := range gossipTopicMappings {
if _, ok := m[reflect.TypeOf(v)]; ok {
@@ -23,7 +24,7 @@ func TestMappingHasNoDuplicates(t *testing.T) {
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
bCfg := params.BeaconConfig().Copy()
altairForkEpoch := eth2types.Epoch(100)
BellatrixForkEpoch := eth2types.Epoch(200)

View File

@@ -17,6 +17,7 @@ import (
)
func TestMsgID_HashesCorrectly(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
assert.NoError(t, err)
@@ -36,6 +37,7 @@ func TestMsgID_HashesCorrectly(t *testing.T) {
}
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
assert.NoError(t, err)
@@ -65,6 +67,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
}
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
assert.NoError(t, err)
@@ -94,6 +97,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
}
func TestMsgID_WithNilTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
msg := &pubsubpb.Message{
Data: make([]byte, 32),
Topic: nil,

View File

@@ -17,6 +17,7 @@ import (
)
func TestPrivateKeyLoading(t *testing.T) {
params.SetupTestConfigCleanup(t)
file, err := os.CreateTemp(t.TempDir(), "key")
require.NoError(t, err)
key, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
@@ -44,6 +45,7 @@ func TestPrivateKeyLoading(t *testing.T) {
}
func TestIPV6Support(t *testing.T) {
params.SetupTestConfigCleanup(t)
key, err := gethCrypto.GenerateKey()
require.NoError(t, err)
db, err := enode.OpenDB("")

View File

@@ -4,10 +4,12 @@ import (
"testing"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func TestOverlayParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
pms := pubsubGossipParam()
assert.Equal(t, gossipSubD, pms.D, "gossipSubD")
assert.Equal(t, gossipSubDlo, pms.Dlo, "gossipSubDlo")
@@ -15,6 +17,7 @@ func TestOverlayParameters(t *testing.T) {
}
func TestGossipParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
setPubSubParameters()
pms := pubsubGossipParam()
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
@@ -23,6 +26,7 @@ func TestGossipParameters(t *testing.T) {
}
func TestFanoutParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
pms := pubsubGossipParam()
if pms.FanoutTTL != gossipSubFanoutTTL {
t.Errorf("gossipSubFanoutTTL, wanted: %d, got: %d", gossipSubFanoutTTL, pms.FanoutTTL)
@@ -30,6 +34,7 @@ func TestFanoutParameters(t *testing.T) {
}
func TestHeartbeatParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
pms := pubsubGossipParam()
if pms.HeartbeatInterval != gossipSubHeartbeatInterval {
t.Errorf("gossipSubHeartbeatInterval, wanted: %d, got: %d", gossipSubHeartbeatInterval, pms.HeartbeatInterval)
@@ -37,6 +42,7 @@ func TestHeartbeatParameters(t *testing.T) {
}
func TestMiscParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
setPubSubParameters()
assert.Equal(t, randomSubD, pubsub.RandomSubD, "randomSubD")
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/network/forks"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -21,6 +22,7 @@ import (
)
func TestService_CanSubscribe(t *testing.T) {
params.SetupTestConfigCleanup(t)
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
@@ -115,11 +117,13 @@ func TestService_CanSubscribe(t *testing.T) {
}
func TestService_CanSubscribe_uninitialized(t *testing.T) {
params.SetupTestConfigCleanup(t)
s := &Service{}
require.Equal(t, false, s.CanSubscribe("foo"))
}
func Test_scanfcheck(t *testing.T) {
params.SetupTestConfigCleanup(t)
type args struct {
input string
format string
@@ -191,6 +195,7 @@ func Test_scanfcheck(t *testing.T) {
}
func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testing.T) {
params.SetupTestConfigCleanup(t)
// scanfcheck only supports integer based substitutions at the moment. Any others will
// inaccurately fail validation.
for _, topic := range AllTopics() {
@@ -208,6 +213,7 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
}
func TestService_FilterIncomingSubscriptions(t *testing.T) {
params.SetupTestConfigCleanup(t)
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
valRoot := [32]byte{}
@@ -328,6 +334,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
}
func TestService_MonitorsStateForkUpdates(t *testing.T) {
params.SetupTestConfigCleanup(t)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
notifier := &mock.MockStateNotifier{}

View File

@@ -15,6 +15,7 @@ import (
)
func TestVerifyRPCMappings(t *testing.T) {
params.SetupTestConfigCleanup(t)
assert.NoError(t, VerifyTopicMapping(RPCStatusTopicV1, &pb.Status{}), "Failed to verify status rpc topic")
assert.NotNil(t, VerifyTopicMapping(RPCStatusTopicV1, new([]byte)), "Incorrect message type verified for status rpc topic")
@@ -25,6 +26,7 @@ func TestVerifyRPCMappings(t *testing.T) {
}
func TestTopicDeconstructor(t *testing.T) {
params.SetupTestConfigCleanup(t)
tt := []struct {
name string
topic string
@@ -81,7 +83,7 @@ func TestTopicDeconstructor(t *testing.T) {
func TestTopicFromMessage_CorrectType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
bCfg := params.BeaconConfig().Copy()
forkEpoch := eth2types.Epoch(100)
bCfg.AltairForkEpoch = forkEpoch
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)

View File

@@ -6,17 +6,18 @@ import (
"testing"
"time"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/libp2p/go-libp2p-core/network"
testp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/protobuf/proto"
)
func TestService_Send(t *testing.T) {
params.SetupTestConfigCleanup(t)
p1 := testp2p.NewTestP2P(t)
p2 := testp2p.NewTestP2P(t)
p1.Connect(p2)

View File

@@ -22,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/network/forks"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -81,6 +82,7 @@ func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
}
func TestService_Stop_SetsStartedToFalse(t *testing.T) {
params.SetupTestConfigCleanup(t)
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
require.NoError(t, err)
s.started = true
@@ -90,12 +92,14 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
}
func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
params.SetupTestConfigCleanup(t)
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
require.NoError(t, err)
assert.NoError(t, s.Stop())
}
func TestService_Start_OnlyStartsOnce(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
cfg := &Config{
@@ -131,12 +135,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
}
func TestService_Status_NotRunning(t *testing.T) {
params.SetupTestConfigCleanup(t)
s := &Service{started: false}
s.dv5Listener = &mockListener{}
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
}
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
params.SetupTestConfigCleanup(t)
s := &Service{started: true}
s.dv5Listener = &mockListener{}
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
@@ -147,6 +153,7 @@ func TestService_Status_NoGenesisTimeSet(t *testing.T) {
}
func TestListenForNewNodes(t *testing.T) {
params.SetupTestConfigCleanup(t)
// Setup bootnode.
notifier := &mock.MockStateNotifier{}
cfg := &Config{StateNotifier: notifier}
@@ -241,6 +248,7 @@ func TestListenForNewNodes(t *testing.T) {
}
func TestPeer_Disconnect(t *testing.T) {
params.SetupTestConfigCleanup(t)
h1, _, _ := createHost(t, 5000)
defer func() {
if err := h1.Close(); err != nil {
@@ -271,6 +279,7 @@ func TestPeer_Disconnect(t *testing.T) {
}
func TestService_JoinLeaveTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
@@ -329,6 +338,7 @@ func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.
}
func TestService_connectWithPeer(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
peers *peers.Status

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -24,6 +25,7 @@ import (
)
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
t.Skip("This test is now failing after PR 7885 due to false positive")
gFlags := new(flags.GlobalFlags)
@@ -146,6 +148,7 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
}
func Test_AttSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
record func(t *testing.T) *enr.Record
@@ -330,6 +333,7 @@ func Test_AttSubnets(t *testing.T) {
}
func Test_SyncSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
record func(t *testing.T) *enr.Record

View File

@@ -24,7 +24,7 @@ func TestInitializeDataMaps(t *testing.T) {
{
name: "fork version changes",
action: func() {
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.GenesisForkVersion = []byte{0x01, 0x02, 0x00, 0x00}
params.OverrideBeaconConfig(cfg)
},
@@ -33,7 +33,7 @@ func TestInitializeDataMaps(t *testing.T) {
{
name: "fork version changes with reset",
action: func() {
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.GenesisForkVersion = []byte{0x01, 0x02, 0x00, 0x00}
params.OverrideBeaconConfig(cfg)
InitializeDataMaps()

View File

@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
@@ -14,6 +15,7 @@ import (
// Test `verifyConnectivity` function by trying to connect to google.com (successfully)
// and then by connecting to an unreachable IP and ensuring that a log is emitted
func TestVerifyConnectivity(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
cases := []struct {
address string
@@ -39,6 +41,7 @@ func TestVerifyConnectivity(t *testing.T) {
}
func TestSerializeENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
t.Run("Ok", func(t *testing.T) {
key, err := crypto.GenerateKey()
require.NoError(t, err)

View File

@@ -26,7 +26,7 @@ const pubKeyErr = "could not convert bytes to public key"
func TestDepositContractAddress_EmptyAddress(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.DepositContractAddress = ""
params.OverrideBeaconConfig(config)
@@ -36,7 +36,7 @@ func TestDepositContractAddress_EmptyAddress(t *testing.T) {
func TestDepositContractAddress_NotHexAddress(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.DepositContractAddress = "abc?!"
params.OverrideBeaconConfig(config)

View File

@@ -81,8 +81,6 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
switch result.Status {
case pb.PayloadStatus_INVALID_BLOCK_HASH:
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
return nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:
@@ -119,8 +117,6 @@ func (s *Service) ForkchoiceUpdated(
}
resp := result.Status
switch resp.Status {
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
case pb.PayloadStatus_SYNCING:
return nil, nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:

View File

@@ -217,27 +217,6 @@ func TestClient_HTTP(t *testing.T) {
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
@@ -274,18 +253,6 @@ func TestClient_HTTP(t *testing.T) {
require.ErrorContains(t, "could not validate block hash", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
@@ -521,7 +488,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
@@ -819,13 +786,6 @@ func fixtures() map[string]interface{} {
},
PayloadId: &id,
}
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
},
PayloadId: &id,
}
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
@@ -856,10 +816,6 @@ func fixtures() map[string]interface{} {
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
LatestValidHash: nil,
}
inValidTerminalBlockStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
}
acceptedStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
LatestValidHash: nil,
@@ -877,21 +833,19 @@ func fixtures() map[string]interface{} {
LatestValidHash: foo[:],
}
return map[string]interface{}{
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
"AcceptedStatus": acceptedStatus,
"SyncingStatus": syncingStatus,
"InvalidStatus": invalidStatus,
"UnknownStatus": unknownStatus,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
"TransitionConfiguration": transitionCfg,
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"AcceptedStatus": acceptedStatus,
"SyncingStatus": syncingStatus,
"InvalidStatus": invalidStatus,
"UnknownStatus": unknownStatus,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
"TransitionConfiguration": transitionCfg,
}
}

View File

@@ -6,7 +6,7 @@ import (
func init() {
// Override network name so that hardcoded genesis files are not loaded.
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
}

View File

@@ -236,7 +236,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
bConfig := params.MinimalSpecConfig()
bConfig := params.MinimalSpecConfig().Copy()
bConfig.MinGenesisTime = 0
params.OverrideBeaconConfig(bConfig)
@@ -284,7 +284,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
func TestProcessETH2GenesisLog(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.GenesisDelay = 0
params.OverrideBeaconConfig(cfg)
hook := logTest.NewGlobal()
@@ -310,7 +310,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
bConfig := params.MinimalSpecConfig()
bConfig := params.MinimalSpecConfig().Copy()
bConfig.MinGenesisTime = 0
params.OverrideBeaconConfig(bConfig)
@@ -378,6 +378,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
}
func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
@@ -406,8 +407,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
web3Service.latestEth1Data.LastRequestedBlock = 0
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
params.SetupTestConfigCleanup(t)
bConfig := params.MinimalSpecConfig()
bConfig := params.MinimalSpecConfig().Copy()
bConfig.MinGenesisTime = 0
bConfig.SecondsPerETH1Block = 10
params.OverrideBeaconConfig(bConfig)
@@ -476,6 +476,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
}
func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
@@ -504,8 +505,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
web3Service.latestEth1Data.LastRequestedBlock = 0
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
params.SetupTestConfigCleanup(t)
bConfig := params.MinimalSpecConfig()
bConfig := params.MinimalSpecConfig().Copy()
bConfig.SecondsPerETH1Block = 10
params.OverrideBeaconConfig(bConfig)
nConfig := params.BeaconNetworkConfig()
@@ -552,7 +552,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
// Set the genesis time 500 blocks ahead of the last
// deposit log.
bConfig = params.MinimalSpecConfig()
bConfig = params.MinimalSpecConfig().Copy()
bConfig.MinGenesisTime = wantedGenesisTime - 10
params.OverrideBeaconConfig(bConfig)
@@ -616,7 +616,7 @@ func newPowchainService(t *testing.T, eth1Backend *mock.TestAccount, beaconDB db
web3Service.eth1DataFetcher = &goodFetcher{backend: eth1Backend.Backend}
web3Service.httpLogger = &goodLogger{backend: eth1Backend.Backend}
params.SetupTestConfigCleanup(t)
bConfig := params.MinimalSpecConfig()
bConfig := params.MinimalSpecConfig().Copy()
bConfig.MinGenesisTime = 0
params.OverrideBeaconConfig(bConfig)
return web3Service

View File

@@ -70,7 +70,9 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
continue
}
// Close previous client, if connection was successful.
currClient.Close()
if currClient != nil {
currClient.Close()
}
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
return
case <-s.ctx.Done():
@@ -92,7 +94,9 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
return
}
// Close previous client, if connection was successful.
currClient.Close()
if currClient != nil {
currClient.Close()
}
// Reset run error in the event of a successful connection.
s.runError = nil
}
@@ -114,7 +118,9 @@ func (s *Service) checkDefaultEndpoint(ctx context.Context) {
return
}
// Close previous client, if connection was successful.
currClient.Close()
if currClient != nil {
currClient.Close()
}
s.updateCurrHttpEndpoint(primaryEndpoint)
}

View File

@@ -767,9 +767,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
if eth1DataInDB == nil {
return nil
}
s.depositTrie = trie.CreateTrieFromProto(eth1DataInDB.Trie)
s.chainStartData = eth1DataInDB.ChainstartData
var err error
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
if err != nil {
return err
}
s.chainStartData = eth1DataInDB.ChainstartData
if !reflect.ValueOf(eth1DataInDB.BeaconState).IsZero() {
s.preGenesisState, err = v1.InitializeFromProto(eth1DataInDB.BeaconState)
if err != nil {

View File

@@ -252,7 +252,7 @@ func TestFollowBlock_OK(t *testing.T) {
// simulated backend sets eth1 block
// time as 10 seconds
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf := params.BeaconConfig().Copy()
conf.SecondsPerETH1Block = 10
params.OverrideBeaconConfig(conf)
@@ -340,7 +340,7 @@ func TestLogTillGenesis_OK(t *testing.T) {
}()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.Eth1FollowDistance = 5
params.OverrideBeaconConfig(cfg)
@@ -506,7 +506,7 @@ func TestNewService_EarliestVotingBlock(t *testing.T) {
// simulated backend sets eth1 block
// time as 10 seconds
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf := params.BeaconConfig().Copy()
conf.SecondsPerETH1Block = 10
conf.Eth1FollowDistance = 50
params.OverrideBeaconConfig(conf)

View File

@@ -28,6 +28,7 @@ type EngineClient struct {
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
}
// NewPayload --
@@ -37,8 +38,11 @@ func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, e.ErrForkchoiceUpdated
}

View File

@@ -16,44 +16,78 @@ import (
"github.com/r3labs/sse"
)
const (
versionHeader = "Eth-Consensus-Version"
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
)
type sszConfig struct {
sszPath string
fileName string
responseJson sszResponseJson
responseJson sszResponse
}
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v1/debug/beacon/states/{state_id}/ssz",
fileName: "beacon_state.ssz",
responseJson: &beaconStateSSZResponseJson{},
responseJson: &sszResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v1/beacon/blocks/{block_id}/ssz",
fileName: "beacon_block.ssz",
responseJson: &blockSSZResponseJson{},
responseJson: &sszResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v2/debug/beacon/states/{state_id}/ssz",
fileName: "beacon_state.ssz",
responseJson: &beaconStateSSZResponseV2Json{},
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
sszPath: "/eth/v2/beacon/blocks/{block_id}/ssz",
fileName: "beacon_block.ssz",
responseJson: &blockSSZResponseV2Json{},
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
}
func handleSubmitBlindedBlockSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
) (handled bool) {
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
}
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
fileName: "produce_beacon_block.ssz",
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleProduceBlindedBlockSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
) (handled bool) {
config := sszConfig{
fileName: "produce_blinded_beacon_block.ssz",
responseJson: &versionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
@@ -69,7 +103,7 @@ func handleGetSSZ(
return false
}
if errJson := prepareSSZRequestForProxying(m, endpoint, req, config.sszPath); errJson != nil {
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
@@ -112,6 +146,53 @@ func handleGetSSZ(
return true
}
func handlePostSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
config sszConfig,
) (handled bool) {
if !sszPosted(req) {
return false
}
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
prepareCustomHeaders(req)
if errJson := preparePostedSSZData(req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponse, errJson := m.ProxyRequest(req)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return
}
if respHasError {
return
}
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
return true
}
func sszRequested(req *http.Request) bool {
accept, ok := req.Header["Accept"]
if !ok {
@@ -125,24 +206,57 @@ func sszRequested(req *http.Request) bool {
return false
}
func prepareSSZRequestForProxying(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
req *http.Request, sszPath string,
) apimiddleware.ErrorJson {
func sszPosted(req *http.Request) bool {
ct, ok := req.Header["Content-Type"]
if !ok {
return false
}
if len(ct) != 1 {
return false
}
return ct[0] == "application/octet-stream"
}
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
req.URL.Scheme = "http"
req.URL.Host = m.GatewayAddress
req.RequestURI = ""
req.URL.Path = sszPath
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, []string{}); errJson != nil {
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
return errJson
}
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
return errJson
}
// We have to add new segments after handling parameters because it changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
return nil
}
func serializeMiddlewareResponseIntoSSZ(respJson sszResponseJson) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
func prepareCustomHeaders(req *http.Request) {
ver := req.Header.Get(versionHeader)
if ver != "" {
req.Header.Del(versionHeader)
req.Header.Add(grpcVersionHeader, ver)
}
}
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
buf, err := io.ReadAll(req.Body)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
}
j := sszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
data, err := json.Marshal(j)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
}
req.Body = io.NopCloser(bytes.NewBuffer(data))
req.ContentLength = int64(len(data))
req.Header.Set("Content-Type", "application/json")
return nil
}
func serializeMiddlewareResponseIntoSSZ(respJson sszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
// Serialize the SSZ part of the deserialized value.
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
if err != nil {
@@ -168,7 +282,7 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
w.Header().Set("Eth-Consensus-Version", respVersion)
w.Header().Set(versionHeader, respVersion)
if statusCodeHeader != "" {
code, err := strconv.Atoi(statusCodeHeader)
if err != nil {

View File

@@ -70,11 +70,21 @@ func TestPrepareSSZRequestForProxying(t *testing.T) {
var body bytes.Buffer
request := httptest.NewRequest("GET", "http://foo.example", &body)
errJson := prepareSSZRequestForProxying(middleware, endpoint, request, "/ssz")
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/internal/ssz", request.URL.Path)
}
func TestPreparePostedSszData(t *testing.T) {
var body bytes.Buffer
body.Write([]byte("body"))
request := httptest.NewRequest("POST", "http://foo.example", &body)
preparePostedSSZData(request)
assert.Equal(t, int64(19), request.ContentLength)
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
}
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
t.Run("ok", func(t *testing.T) {
j := testSSZResponseJson{
@@ -133,7 +143,7 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "attachment; filename=test.ssz", v[0])
v, ok = writer.Header()["Eth-Consensus-Version"]
v, ok = writer.Header()[versionHeader]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "version", v[0])

View File

@@ -364,7 +364,7 @@ func TestWrapSignedContributionAndProofsArray(t *testing.T) {
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
params.OverrideBeaconConfig(cfg)
@@ -470,7 +470,7 @@ func TestPreparePublishedBlock(t *testing.T) {
func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg := params.BeaconConfig().Copy()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
params.OverrideBeaconConfig(cfg)

View File

@@ -110,11 +110,13 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlockSSZ}
case "/eth/v1/beacon/blinded_blocks":
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlindedBlockSSZ}
case "/eth/v1/beacon/blocks/{block_id}":
endpoint.GetResponse = &blockResponseJson{}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
@@ -221,6 +223,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
case "/eth/v1/validator/blinded_blocks/{slot}":
endpoint.GetResponse = &produceBlindedBlockResponseJson{}
endpoint.RequestURLLiterals = []string{"slot"}
@@ -228,6 +231,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
case "/eth/v1/validator/attestation_data":
endpoint.GetResponse = &produceAttestationDataResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}

View File

@@ -7,6 +7,10 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
)
//----------------
// Requests and responses.
//----------------
// genesisResponseJson is used in /beacon/genesis API endpoint.
type genesisResponseJson struct {
Data *genesisResponse_GenesisJson `json:"data"`
@@ -457,7 +461,7 @@ type blindedBeaconBlockBodyBellatrixJson struct {
Deposits []*depositJson `json:"deposits"`
VoluntaryExits []*signedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *syncAggregateJson `json:"sync_aggregate"`
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload"`
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload_header"`
}
type executionPayloadJson struct {
@@ -489,7 +493,7 @@ type executionPayloadHeaderJson struct {
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
}
@@ -833,63 +837,38 @@ type syncCommitteeContributionJson struct {
// SSZ
// ---------------
// sszResponseJson is a common abstraction over all SSZ responses.
type sszResponseJson interface {
type sszRequestJson struct {
Data string `json:"data"`
}
// sszResponse is a common abstraction over all SSZ responses.
type sszResponse interface {
SSZVersion() string
SSZData() string
}
// blockSSZResponseJson is used in /beacon/blocks/{block_id} API endpoint.
type blockSSZResponseJson struct {
type sszResponseJson struct {
Data string `json:"data"`
}
func (ssz *blockSSZResponseJson) SSZData() string {
func (ssz *sszResponseJson) SSZData() string {
return ssz.Data
}
func (*blockSSZResponseJson) SSZVersion() string {
func (*sszResponseJson) SSZVersion() string {
return strings.ToLower(ethpbv2.Version_PHASE0.String())
}
// blockSSZResponseV2Json is used in /v2/beacon/blocks/{block_id} API endpoint.
type blockSSZResponseV2Json struct {
type versionedSSZResponseJson struct {
Version string `json:"version"`
Data string `json:"data"`
}
func (ssz *blockSSZResponseV2Json) SSZData() string {
func (ssz *versionedSSZResponseJson) SSZData() string {
return ssz.Data
}
func (ssz *blockSSZResponseV2Json) SSZVersion() string {
return ssz.Version
}
// beaconStateSSZResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
type beaconStateSSZResponseJson struct {
Data string `json:"data"`
}
func (ssz *beaconStateSSZResponseJson) SSZData() string {
return ssz.Data
}
func (*beaconStateSSZResponseJson) SSZVersion() string {
return strings.ToLower(ethpbv2.Version_PHASE0.String())
}
// beaconStateSSZResponseV2Json is used in /v2/debug/beacon/states/{state_id} API endpoint.
type beaconStateSSZResponseV2Json struct {
Version string `json:"version"`
Data string `json:"data"`
}
func (ssz *beaconStateSSZResponseV2Json) SSZData() string {
return ssz.Data
}
func (ssz *beaconStateSSZResponseV2Json) SSZVersion() string {
func (ssz *versionedSSZResponseJson) SSZVersion() string {
return ssz.Version
}

View File

@@ -44,6 +44,7 @@ go_library(
"//consensus-types/wrapper:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -56,6 +57,7 @@ go_library(
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
@@ -114,6 +116,7 @@ go_test(
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],

View File

@@ -18,16 +18,21 @@ import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/network/forks"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
"github.com/prysmaticlabs/prysm/proto/migration"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
const versionHeader = "eth-consensus-version"
// blockIdParseError represents an error scenario where a block ID could not be parsed.
type blockIdParseError struct {
message string
@@ -109,7 +114,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoot)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -175,7 +180,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
if !isOptimistic {
isOptimistic, err = bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -223,6 +228,45 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
return &emptypb.Empty{}, nil
}
// SubmitBlockSSZ instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
// included in the beacon chain. The beacon node is not required to validate the signed BeaconBlock, and a successful
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
// still broadcast but a different status code is returned (202).
//
// The provided block must be SSZ-serialized.
func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
defer span.End()
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
}
unmarshaler, err := detect.FromForkVersion(forkVer)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
}
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
@@ -259,6 +303,48 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
return &emptypb.Empty{}, nil
}
// SubmitBlindedBlockSSZ instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
// to be included in the beacon chain. The beacon node is not required to validate the signed
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
// successful. The beacon node is expected to integrate the new block into its state, and
// therefore validate the block internally, however blocks which fail the validation are still
// broadcast but a different status code is returned (202).
//
// The provided block must be SSZ-serialized.
func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
defer span.End()
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
ver := md.Get(versionHeader)
if len(ver) == 0 {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
}
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
forkVer, err := schedule.VersionForName(ver[0])
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
}
unmarshaler, err := detect.FromForkVersion(forkVer)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
}
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
// GetBlock retrieves block details for given block ID.
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
@@ -371,7 +457,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -393,7 +479,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
}
// GetBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.BlockSSZResponseV2, error) {
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
defer span.End()
@@ -413,7 +499,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
}
// ErrUnsupportedPhase0Block means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedPhase0Block) {
@@ -437,7 +523,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
}
// ErrUnsupportedAltairBlock means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedAltairBlock) {
@@ -461,7 +547,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
}
return &ethpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
}
// ErrUnsupportedBellatrixBlock means that we have another block type
if !errors.Is(err, wrapper.ErrUnsupportedBellatrixBlock) {
@@ -543,7 +629,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
}
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -616,7 +702,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
@@ -21,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/grpc/metadata"
)
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
@@ -187,9 +189,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
tests := []struct {
@@ -287,9 +290,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
header, err := bs.GetBlockHeader(ctx, &ethpbv1.BlockRequest{BlockId: []byte("head")})
require.NoError(t, err)
@@ -312,9 +316,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
b2 := util.NewBeaconBlock()
@@ -416,9 +421,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
slot := types.Slot(30)
headers, err := bs.ListBlockHeaders(ctx, &ethpbv1.BlockHeadersRequest{
@@ -557,6 +563,289 @@ func TestServer_SubmitBlock_OK(t *testing.T) {
})
}
func TestServer_SubmitBlockSSZ_OK(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
// INFO: This code block can be removed once Bellatrix
// fork epoch is set to a value other than math.MaxUint64
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
}
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
t.Run("Bellatrix", func(t *testing.T) {
// INFO: This code block can be removed once Bellatrix
// fork epoch is set to a value other than math.MaxUint64
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
params.OverrideBeaconConfig(cfg)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
}
req := util.NewBlindedBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
blockSsz, err := req.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
})
}
func TestSubmitBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
@@ -836,9 +1125,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -955,9 +1245,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1074,9 +1365,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1194,9 +1486,10 @@ func TestServer_GetBlockV2(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
blk, err := bs.GetBlockV2(ctx, &ethpbv2.BlockRequestV2{
@@ -1395,9 +1688,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
@@ -1485,9 +1779,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
@@ -1513,9 +1808,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -1615,9 +1911,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1717,9 +2014,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1820,9 +2118,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
resp, err := bs.ListBlockAttestations(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),

View File

@@ -16,7 +16,7 @@ import (
func TestGetSpec(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.ConfigName = "ConfigName"
config.PresetBase = "PresetBase"
@@ -353,7 +353,7 @@ func TestGetDepositContract(t *testing.T) {
const chainId = 99
const address = "0x0000000000000000000000000000000000000009"
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.DepositChainID = chainId
config.DepositContractAddress = address
params.OverrideBeaconConfig(config)
@@ -372,7 +372,7 @@ func TestForkSchedule_Ok(t *testing.T) {
thirdForkVersion, thirdForkEpoch := []byte("Thir"), types.Epoch(300)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.GenesisForkVersion = genesisForkVersion
// Create fork schedule adding keys in non-sorted order.
schedule := make(map[[4]byte]types.Epoch, 3)

View File

@@ -6,7 +6,7 @@ import (
func init() {
// Override network name so that hardcoded genesis files are not loaded.
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
}

View File

@@ -703,7 +703,7 @@ func TestSubmitVoluntaryExit_InvalidExit(t *testing.T) {
func TestServer_SubmitAttestations_Ok(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
@@ -809,7 +809,7 @@ func TestServer_SubmitAttestations_ValidAttestationSubmitted(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
@@ -909,7 +909,7 @@ func TestServer_SubmitAttestations_InvalidAttestationGRPCHeader(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)

View File

@@ -34,6 +34,7 @@ type Server struct {
StateGenService stategen.StateManager
StateFetcher statefetcher.Fetcher
HeadFetcher blockchain.HeadFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
V1Alpha1ValidatorServer *v1alpha1validator.Server
SyncChecker sync.Checker
CanonicalHistory *stategen.CanonicalHistory

View File

@@ -75,7 +75,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -103,7 +103,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
return nil, helpers.PrepareStateFetchGRPCError(err)
}
fork := st.Fork()
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -133,7 +133,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -22,7 +22,7 @@ import (
func TestGetGenesis(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config := params.BeaconConfig().Copy()
config.GenesisForkVersion = []byte("genesis")
params.OverrideBeaconConfig(config)
genesis := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)
@@ -80,13 +80,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
@@ -107,13 +109,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -138,12 +142,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(ctx, &eth.StateRequest{
@@ -167,12 +173,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -204,12 +212,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(ctx, &eth.StateRequest{
@@ -235,12 +245,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),

View File

@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -162,6 +162,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -169,8 +170,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
resp, err := s.ListSyncCommittees(ctx, req)
@@ -205,6 +207,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &mock.ChainService{Optimistic: true}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -212,8 +215,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListSyncCommittees(ctx, req)
require.NoError(t, err)
@@ -261,6 +265,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
}))
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -268,8 +273,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
StateFetcher: &futureSyncMockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{}
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod

View File

@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
return nil, status.Error(codes.NotFound, "Could not find validator")
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -80,7 +80,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
return nil, handleValContainerErr(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -143,7 +143,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -202,7 +202,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -33,12 +33,14 @@ func TestGetValidator(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head Get Validator by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
@@ -50,12 +52,14 @@ func TestGetValidator(t *testing.T) {
})
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
pubKey := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -93,12 +97,14 @@ func TestGetValidator(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
StateId: []byte("head"),
@@ -117,12 +123,14 @@ func TestListValidators(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head List All Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -136,12 +144,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -158,12 +168,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -184,12 +196,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -212,12 +226,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown public key is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
existingKey := st.PubkeyAtIndex(types.ValidatorIndex(1))
@@ -232,12 +248,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown index is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("1"), []byte("99999")}
@@ -261,12 +279,14 @@ func TestListValidators(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
StateId: []byte("head"),
@@ -349,12 +369,14 @@ func TestListValidators_Status(t *testing.T) {
}
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -384,12 +406,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -418,12 +442,14 @@ func TestListValidators_Status(t *testing.T) {
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*35))
t.Run("Head List All EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -451,12 +477,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -484,12 +512,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -532,12 +562,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, st.SetBalances(balances))
t.Run("Head List Validators Balance by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -554,12 +586,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -579,12 +613,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -613,12 +649,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -640,12 +678,14 @@ func TestListCommittees(t *testing.T) {
epoch := slots.ToEpoch(st.Slot())
t.Run("Head All Committees", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -660,12 +700,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
epoch := types.Epoch(10)
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -679,12 +721,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
slot := types.Slot(4)
@@ -704,12 +748,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -729,12 +775,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -764,12 +812,14 @@ func TestListCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{

View File

@@ -39,7 +39,7 @@ func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest)
}
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateSSZResponse, error) {
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
defer span.End()
@@ -53,7 +53,7 @@ func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateReque
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
}
return &ethpbv1.BeaconStateSSZResponse{Data: sszState}, nil
return &ethpbv2.SSZContainer{Data: sszState}, nil
}
// GetBeaconStateV2 returns the full beacon state for a given state ID.
@@ -65,7 +65,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -121,21 +121,32 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
}
// GetBeaconStateSSZV2 returns the SSZ-serialized version of the full beacon state object for given state ID.
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.BeaconStateSSZResponseV2, error) {
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZV2")
defer span.End()
state, err := ds.StateFetcher.State(ctx, req.StateId)
st, err := ds.StateFetcher.State(ctx, req.StateId)
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
sszState, err := state.MarshalSSZ()
sszState, err := st.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
}
var ver ethpbv2.Version
switch st.Version() {
case version.Phase0:
ver = ethpbv2.Version_PHASE0
case version.Altair:
ver = ethpbv2.Version_ALTAIR
case version.Bellatrix:
ver = ethpbv2.Version_BELLATRIX
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
return &ethpbv2.BeaconStateSSZResponseV2{Data: sszState}, nil
return &ethpbv2.SSZContainer{Data: sszState, Version: ver}, nil
}
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
@@ -167,7 +178,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
}
for i := range headRoots {
isOptimistic, err := ds.HeadFetcher.IsOptimisticForRoot(ctx, headRoots[i])
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
}

View File

@@ -44,8 +44,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -60,8 +61,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -76,8 +78,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -102,8 +105,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -153,6 +157,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
})
t.Run("Altair", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateAltair(t, 1)
@@ -171,6 +176,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
})
t.Run("Bellatrix", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
@@ -189,6 +195,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
}
@@ -238,8 +245,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
}}
chainService := &blockchainmock.ChainService{}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)
@@ -257,8 +266,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
}
t.Run("optimistic head", func(t *testing.T) {
chainService := &blockchainmock.ChainService{Optimistic: true}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)

View File

@@ -12,7 +12,8 @@ import (
// Server defines a server implementation of the gRPC Beacon Chain service,
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
type Server struct {
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}

View File

@@ -39,7 +39,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
}
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockchain.HeadFetcher) (bool, error) {
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return false, errors.Wrap(err, "could not get state root")
@@ -50,7 +50,7 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockch
if err != nil {
return false, errors.Wrap(err, "could not get header root")
}
isOptimistic, err := headFetcher.IsOptimisticForRoot(ctx, headRoot)
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
if err != nil {
return false, errors.Wrap(err, "could not check if block is optimistic")
}

View File

@@ -58,14 +58,14 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
t.Run("optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, true, o)
})
t.Run("not optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, false, o)
})

View File

@@ -13,14 +13,15 @@ import (
// Server defines a server implementation of the gRPC Validator service,
// providing RPC endpoints intended for validator clients.
type Server struct {
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
}

View File

@@ -58,7 +58,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -142,7 +142,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -258,7 +258,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -348,6 +348,76 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlockV2SSZ requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
//
// The produced block is in SSZ form.
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_PHASE0,
Data: sszBlock,
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_ALTAIR,
Data: sszBlock,
}, nil
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_BELLATRIX,
Data: sszBlock,
}, nil
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
// which can then be signed by a proposer and submitted.
//
@@ -413,6 +483,79 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
// which can then be signed by a proposer and submitted.
//
// The produced block is in SSZ form.
//
// Pre-Bellatrix, this endpoint will return a regular block.
func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_PHASE0,
Data: sszBlock,
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_ALTAIR,
Data: sszBlock,
}, nil
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
sszBlock, err := block.MarshalSSZ()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_BELLATRIX,
Data: sszBlock,
}, nil
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
func (vs *Server) PrepareBeaconProposer(
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,

File diff suppressed because it is too large Load Diff

View File

@@ -740,7 +740,8 @@ func TestServer_StreamBlocks_ContextCanceled(t *testing.T) {
func TestServer_StreamBlocks_OnHeadUpdated(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.UseMainnetConfig()
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconState, privs := util.DeterministicGenesisState(t, 32)
b, err := util.GenerateFullBlock(beaconState, privs, util.DefaultBlockGenConfig(), 1)
@@ -778,7 +779,8 @@ func TestServer_StreamBlocks_OnHeadUpdated(t *testing.T) {
func TestServer_StreamBlocksVerified_OnHeadUpdated(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.UseMainnetConfig()
params.OverrideBeaconConfig(params.MainnetConfig())
db := dbTest.SetupDB(t)
ctx := context.Background()
beaconState, privs := util.DeterministicGenesisState(t, 32)

View File

@@ -6,7 +6,7 @@ import (
func init() {
// Override network name so that hardcoded genesis files are not loaded.
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
}

View File

@@ -1028,6 +1028,7 @@ func TestServer_ListValidators_DefaultPageSize(t *testing.T) {
}
func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
transition.SkipSlotCache.Disable()

Some files were not shown because too many files have changed in this diff Show More