Compare commits

...

94 Commits

Author SHA1 Message Date
terence tsao
30ca6360d5 Fix test 2022-03-07 16:14:18 -08:00
Kasey Kirkham
85c0e0cecf fix lint issues 2022-03-07 17:22:14 -06:00
Kasey Kirkham
7c871f3123 checkint in WIP to ask for help 2022-03-07 17:15:18 -06:00
Kasey Kirkham
1d3dea325f WIP test fixes; rm isCanonicalSlot from val rpc
the replayer code already ensures that the block chosen for replay is
canonical, so searching backwards for the canonical block in the rpc
package is redundant.
2022-03-07 11:05:12 -06:00
Kasey Kirkham
9c91e35367 better guards for nil stategen/caches in tests 2022-03-04 08:31:35 -06:00
Kasey Kirkham
7f2ec23827 tests to ensure state cache is used 2022-03-03 14:23:08 -06:00
Kasey Kirkham
94b147d340 remove unused mockCanonicalChainer 2022-03-03 12:45:16 -06:00
Kasey Kirkham
e4037b3d23 fmt/lint/import fixes 2022-03-03 12:42:47 -06:00
Kasey Kirkham
3bf3ea3ab1 more comment formatting feedback from Radek 2022-03-03 11:17:42 -06:00
Kasey Kirkham
ea0b9c66ff taking Radek's PR feedback suggestions 2022-03-03 11:14:51 -06:00
Kasey Kirkham
1e946e4c3d use CombinedCache w/ ReplayerBuilder in rpc/api 2022-03-03 11:07:29 -06:00
Kasey Kirkham
1e922633ae add CachedGetter interface and CombinedCache impl 2022-03-03 11:03:35 -06:00
Kasey Kirkham
e65f7b6974 fix some lingering bad state comparisons 2022-03-03 10:54:37 -06:00
Kasey Kirkham
a726224397 add ReplayToSlot test and fix the bug it found 2022-03-02 17:36:23 -06:00
Kasey Kirkham
92d66dfc49 + ReplayBlocks test; fix test bugs due to mock bug 2022-03-02 17:01:02 -06:00
Kasey Kirkham
703161a1ac fixes to mock chain generation 2022-03-02 17:00:41 -06:00
Kasey Kirkham
5aed95bbb5 base happy path ReplayBlocks test 2022-03-02 14:08:24 -06:00
Kasey Kirkham
2c800d2853 add missing build file for mock block package 2022-03-02 13:09:19 -06:00
Kasey Kirkham
95acd101d3 pr feedback: fix typo, check for ctx cancellation 2022-03-02 13:00:18 -06:00
Kasey Kirkham
4f554369c5 this error doesn't need to be exported 2022-03-02 12:24:28 -06:00
Kasey Kirkham
267a0c8e1e cover ErrNotFoundState for StateOrError 2022-03-02 12:22:50 -06:00
terence tsao
b66be8c3a6 Merge branch 'develop' of github.com:prysmaticlabs/prysm into new-block-replayer 2022-03-01 14:00:23 -08:00
Raul Jordan
958dd9d783 Check Engine API Transition Configuration in Background (#10250)
* transition proto

* gen pb

* builds

* impl transition config

* begin tests

* transition config messed up

* amend proto

* use str

* passing

* gaz

* config

* client test

* pb

* set to 0

* rem log

* gaz

* check transition config

* check config differences

* check transition config in background

* gaz

* pass

* redundant

* fix up error handling and healthz

* simplify status

* gazelle

* build

* err config check

* test

* gaz

* Fix run time

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-01 21:38:51 +00:00
Preston Van Loon
a3f8ccd924 validator: Fix flaky test TestServer_RefreshJWTSecretOnFileChange (#10296) 2022-03-01 20:34:05 +00:00
Kasey Kirkham
39265ddc99 lint/imports/etc 2022-03-01 14:12:14 -06:00
Kasey Kirkham
f692b041bc moving mock block to its own package 2022-03-01 14:04:21 -06:00
Kasey Kirkham
1383161ced attempt to clarify this tricky error message 2022-03-01 13:55:29 -06:00
Kasey Kirkham
238c5eb70c more comment improvements per Radek 2022-03-01 13:51:07 -06:00
Kasey Kirkham
1317e6e1e0 Radek's suggestions for better comment/error/log 2022-03-01 13:48:28 -06:00
Kasey Kirkham
497e1f4c4a use safe slot subtraction 2022-03-01 13:05:42 -06:00
Kasey Kirkham
ffa772f221 rename ambiguous func param 2022-03-01 12:57:22 -06:00
Kasey Kirkham
3518d0c61d test coverage for reverseChain 2022-03-01 12:48:36 -06:00
terence tsao
d64f6cb7a8 Add engine methods to block processing (#10285)
* Add notify newPayload and forkchoiceUpdate

* Tests

* Raul's feedback

* Update optimistic_sync.go

* Simplify

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-03-01 16:43:06 +00:00
terence tsao
a9a75e0004 Release lock before return validatedTips (#10289)
* Release lock before return

* add test

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-03-01 15:36:47 +00:00
Nishant Das
339540274b Integration of Vectorized Sha256 In Prysm (#10166)
* add changes

* fix for vectorize

* fix bug

* add new bench

* use new algorithms

* add latest updates

* save progress

* hack even more

* add more changes

* change library

* go mod

* fix deps

* fix dumb bug

* add flag and remove redundant code

* clean up better

* remove those ones

* clean up benches

* clean up benches

* cleanup

* gaz

* revert change

* potuz's review

* potuz's review

* potuz's review

* gaz

* potuz's review

* remove cyclical import

* revert ide changes

* potuz's review

* return
2022-02-28 21:56:12 +08:00
Raul Jordan
12ba8f3645 Renaming Random in ExecutionPayloads to PrevRandao (#10283)
* rename proto

* p header

* regen

* regen ssz

* fix randao

* random name changes

* bazel builds

* bt

* incorrect prev randao

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-26 03:47:16 +00:00
Raul Jordan
f3a7f399c0 Engine API Client Authentication for the Merge via HTTP (#10236)
* round tripper with claims

* auth

* edit auth

* test out jwt

* passing

* jwt flag

* comment

* passing

* commentary

* fix up jwt parsing

* gaz

* update jwt libs

* tidy

* gaz

* lint

* tidy up

* comment too long

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-02-25 19:08:43 +00:00
Kasey Kirkham
ba4e255b02 export CanonicalBuilder w/ NewCanonicalBuilder 2022-02-25 11:17:24 -06:00
Kasey Kirkham
00017fb367 deep soooooource! 2022-02-25 11:17:24 -06:00
Kasey Kirkham
b36bd645cf remove unused param vars 2022-02-25 11:17:24 -06:00
Kasey Kirkham
4c70c7a5e1 remove unused receivers 2022-02-25 11:17:24 -06:00
Kasey Kirkham
9501d627d5 fix bad change from github suggestion ui 2022-02-25 11:17:24 -06:00
Kasey Kirkham
a617eb8844 appease deep source 2022-02-25 11:17:24 -06:00
Kasey Kirkham
85bf11e879 trying to clarify StateBySlot comment 2022-02-25 11:17:24 -06:00
kasey
f7ef135cdc use ReplayToSlot when advancing to the beginning of the next slot
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-02-25 11:17:24 -06:00
kasey
3bc76457e9 comment formatting
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-02-25 11:17:24 -06:00
kasey
5195f665d7 comment capitalization
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-02-25 11:17:24 -06:00
Kasey Kirkham
4ca1046025 goimports 2022-02-25 11:17:24 -06:00
Kasey Kirkham
f9f1894be1 goimports 2022-02-25 11:17:24 -06:00
Kasey Kirkham
324cffe1ad improvements to test coverage 2022-02-25 11:17:24 -06:00
Kasey Kirkham
e7f071e719 lint fix 2022-02-25 11:17:24 -06:00
Kasey Kirkham
e70c60e4ee deepsource nits 2022-02-25 11:17:24 -06:00
Kasey Kirkham
fe91399dbc cleanup in response to PR comments 2022-02-25 11:17:24 -06:00
Kasey Kirkham
f8c43aa29f all state by slot calls return post-state
- new stategen.StateReplayer/ReplayerBuilder to give more fine-grained
  control of replaying state+block history
- LOTs of changes to tests
2022-02-25 11:17:24 -06:00
james-prysm
6163e091a7 web3signer: fixes for e2e (#10281)
* fixing logs and caught bug in mappers

* Fix schema

* improving logging

* Update validator/keymanager/remote-web3signer/internal/client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* adding logurus dependency

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-02-25 02:42:26 +00:00
terence tsao
2fb4ddcbe7 Engine API: add payload status handling and tests (#10282)
* Add status handling and tests

* Update client.go

* Fmt

* Update mock_engine_test.go

* Update client_test.go
2022-02-24 19:35:01 +00:00
Radosław Kapka
1c2e463a30 --api-timeout flag (#10260)
* `--api-timeout` flag

* simplify code

* review feedback

* better error handling

* better docs

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-24 18:01:37 +00:00
james-prysm
01e9125761 web3signer: url parsing bug (#10278)
* adding in fixes for url

* fixing gazelle

* fixing wrong keymanager kind

* adding required scheme to urls

* fixing another unit test

* removing unused file

* adding new commit to retrigger deepsource ci
2022-02-24 10:24:11 -06:00
terence tsao
02a088d93c Add validate_merge_block (#10273)
* Add

* Update pow_block.go

* Update BUILD.bazel

* Update beacon-chain/blockchain/pow_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* feedbacks

* Feedbacks

* Fmt

* Update BUILD.bazel

* Update BUILD.bazel

* Update pow_block_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-23 22:41:11 +00:00
terence tsao
8cadb2ac6f Update IsOptimistic to always false (#10276)
* Update  to always false

* Use epoch

* Update chain_info_test.go

* Update chain_info_test.go
2022-02-23 20:16:45 +00:00
Potuz
be722604f7 Fix logarithm of 2 (#10275)
* Fix logarithm of 2

* add regression test

* Update encoding/ssz/merkleize.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-02-23 12:44:04 +00:00
Raul Jordan
3bb2acfc7d Update Web UI to v1.0.3 (#10264)
* update web UI version

* fixing format

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: James He <james@prysmaticlabs.com>
2022-02-22 18:26:15 +00:00
terence tsao
7719356b69 Add opt sync bool to IsOptimistic (#10270)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-22 16:46:42 +00:00
Raul Jordan
75b9bdba7c Small Comment Fix in Exchanging Transition Config (#10271) 2022-02-22 16:16:02 +00:00
Nishant Das
525c818672 Remove SSZ Cache (#10256)
* remove ssz cache

* gaz

* lint

* analyze more

* fix
2022-02-22 17:27:51 +08:00
terence tsao
a55fdf8949 Use type string for total_difficulty (#10265)
* Use string for difficulty

* fix go

* fix test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-02-21 14:03:12 +00:00
Preston Van Loon
7f41b69281 Wrapper: Update block interface and reorganize fork logic (#10267)
* Add ssz.HashRoot interface composition to BeaconBlock interface, move fork specific logic into it's own files

* Remove needless underscore
2022-02-20 20:53:05 +00:00
Preston Van Loon
c5189a6862 Deduplicate TestProposer_ProposeBlock_OK (#10266)
* Deduplicate tests for TestProposer_ProposeBlock

* remove erroneous save of block

* ensure block root is returned
2022-02-19 22:52:03 +00:00
Leo Lara
b4b976c28b Experimental prototype of Apple M1 processor support (#10192)
* Experimental prototype of Apple M1 processor support

* Enable Apple M1 compilation of herumi MCL by adding a precompiled library

* Renable nogo

* Fix by gazelle

* Update go.mod to reflect go 1.17.6 changes in WORKSPACE

* go mod tidy

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-02-19 18:17:08 +00:00
Nishant Das
ced24892a5 Cleanup Powchain Service (#10259)
* add cleanup

* gaz

* Fix test build
2022-02-18 14:13:31 +00:00
terence tsao
49f989e342 Fix base fee endianness (#10253) 2022-02-16 17:48:55 +00:00
Leo Lara
3003f08770 Remove unnecessary target of third_party/herumi/bls.BUILD (#10252)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-02-16 16:35:19 +00:00
terence tsao
0232b5f8f5 Regen pbs (#10227)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-02-16 10:06:41 -06:00
terence tsao
90a15b2fbe Handle PayloadStatus UnmarshalJSON nil cases (#10249)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-16 01:24:50 +00:00
Raul Jordan
0f58c9a925 Exchange Transition Configuration for Engine API (#10248)
* transition proto

* gen pb

* builds

* impl transition config

* begin tests

* transition config messed up

* amend proto

* use str

* passing

* gaz

* config

* client test

* pb

* set to 0

* rem log

* gaz
2022-02-16 00:56:23 +00:00
terence tsao
6311cfd8ab Fix payloadStatusJSON fields (#10246)
* Use pointers

* Use pointers

* Use pointers

* Update json_marshal_unmarshal.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-15 23:26:58 +00:00
Radosław Kapka
6dcf47675b E2E BellatrixE2EForkEpoch constant (#10240)
* Add `BellatrixE2EForkEpoch` constant

* change epoch

* Config param

* revert e2e update

* disable linter

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-15 21:06:14 +00:00
terence tsao
72aa782849 PayloadStatus reserve 0 enum (#10247)
* Leave 0 as UNKNOWN

* Leave 0 as UNKNOWN
2022-02-15 20:32:39 +00:00
terence tsao
cc637bad4a Fix ForkchoiceUpdatedResponse json tag (#10243) 2022-02-15 15:49:36 +00:00
Potuz
f5719f8c8e Fix updateCanonicalNodes (#10241)
* Fix updateCanonicalNodes

* fix logic

* terence's review
2022-02-14 21:30:26 +00:00
Rootul Patel
f550a964f5 Add path to Keystore (#10152)
* Add failing test

* Add path to keystore

* Assert encoded contains "path"

* Fix lint

Ran `bazel run //:gazelle -- fix`

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-02-14 09:57:15 -06:00
Radosław Kapka
c5c039fd6b Unify GenesisValidator(s)Root throughout the codebase (#10230)
* Unify `GenesisValidator(s)Root` throughout the codebase

* comments and literals

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-02-14 13:34:38 +00:00
Radosław Kapka
4d9947543f Native state feature flag (#10232)
* add flag

* working version

* rename flag

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-14 12:33:02 +00:00
Michael Neuder
7370c42bae Adding nil error check to connection status for ETH1 nodes (#10186)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-02-14 10:21:45 +00:00
Radosław Kapka
3c76cc3af5 Return state interface from native state constructors (#10208)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-14 17:51:22 +08:00
terence tsao
28af5bc601 Fix eth_getBlockByHash call (#10239)
* Update client.go

* Update client_test.go
2022-02-13 16:33:11 +00:00
terence tsao
c9f299b50a Clean up fork choice (#10226)
* Clean up fork choice

* Update beacon-chain/forkchoice/protoarray/proposer_boost_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/forkchoice/protoarray/store_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/forkchoice/protoarray/store_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update store.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-02-12 03:33:46 +00:00
terence tsao
9dfb385160 Pad fields to correct length (#10237)
* Pad fields to correct length

* Use constants

* builds

* Fix test

* Update BUILD.bazel

* tests

* passing

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-12 01:58:09 +00:00
Raul Jordan
b1774efeb7 Fix JSON Marshal/Unmarshal for Execution Block (#10238) 2022-02-12 01:06:28 +00:00
Potuz
4b3a723166 Do not stop on error loading synced tips (#10235)
* Do not stop on error loading synced tips

* Terence's review

* Update beacon-chain/blockchain/optimistic_sync.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-11 21:46:53 +00:00
terence tsao
50f253619e Fix engine client method names (#10234)
* Fix engine client method names

* Typo
2022-02-11 16:12:11 -05:00
james-prysm
753afb4fb2 small fix for web3signer (#10223) 2022-02-11 09:41:31 -05:00
Leo Lara
1835f54197 Refactor for DRY state getters_validator_test and getters_test (#10117)
* Refactor for DRY state getters_validator_test and getters_test

* Add BUILD.bazel to beacon-chain/state/testing

* Handle type assertion error in beacon-chain/state tests

* Fix with gazelle

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-02-11 14:48:29 +08:00
terence tsao
26a2311c82 Clean up outdated bellatrix code (#10225)
* Rm outdated merge code

* Update config_test.go

* Native

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-02-11 04:40:12 +00:00
374 changed files with 8381 additions and 4764 deletions

View File

@@ -183,7 +183,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.16.4",
go_version = "1.17.6",
nogo = "@//:nogo",
)
@@ -349,9 +349,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f196fe4367c2d2d01d36565c0dc6eecfa4f03adba1fc03a61d62953fce606e1f",
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.2/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
],
)

View File

@@ -3,6 +3,7 @@ package apimiddleware
import (
"net/http"
"reflect"
"time"
"github.com/gorilla/mux"
)
@@ -14,6 +15,7 @@ import (
type ApiProxyMiddleware struct {
GatewayAddress string
EndpointCreator EndpointFactory
Timeout time.Duration
router *mux.Router
}
@@ -120,7 +122,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
WriteError(w, errJson, nil)
return
}
grpcResp, errJson := ProxyRequest(req)
grpcResp, errJson := m.ProxyRequest(req)
if errJson != nil {
WriteError(w, errJson, nil)
return

View File

@@ -5,10 +5,10 @@ import (
"encoding/json"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/grpc"
@@ -75,11 +75,14 @@ func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *h
}
// ProxyRequest proxies the request to grpc-gateway.
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
// We do not use http.DefaultClient because it does not have any timeout.
netClient := &http.Client{Timeout: time.Minute * 2}
netClient := &http.Client{Timeout: m.Timeout}
grpcResp, err := netClient.Do(req)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
return nil, TimeoutError()
}
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
}
if grpcResp == nil {
@@ -111,9 +114,14 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
w.Header().Set(h, v)
}
}
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
// Handle gRPC timeout.
if resp.StatusCode == http.StatusGatewayTimeout {
WriteError(w, TimeoutError(), resp.Header)
} else {
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
}
return responseHasError, nil
}

View File

@@ -41,6 +41,13 @@ func InternalServerError(err error) *DefaultErrorJson {
}
}
func TimeoutError() *DefaultErrorJson {
return &DefaultErrorJson{
Message: "Request timeout",
Code: http.StatusRequestTimeout,
}
}
// StatusCode returns the error's underlying error code.
func (e *DefaultErrorJson) StatusCode() int {
return e.Code

View File

@@ -52,6 +52,7 @@ type config struct {
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
@@ -248,6 +249,7 @@ func (g *Gateway) registerApiMiddleware() {
g.proxy = &apimiddleware.ApiProxyMiddleware{
GatewayAddress: g.cfg.gatewayAddr,
EndpointCreator: g.cfg.apiMiddlewareEndpointFactory,
Timeout: g.cfg.timeout,
}
log.Info("Starting API middleware")
g.proxy.Run(g.cfg.router)

View File

@@ -1,7 +1,10 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
)
@@ -79,3 +82,12 @@ func WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) Option {
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -56,6 +56,7 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
@@ -65,6 +66,7 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
@@ -73,6 +75,8 @@ go_library(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -104,6 +108,7 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_engine_test.go",
"mock_test.go",
"optimistic_sync_test.go",
"pow_block_test.go",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
)
@@ -36,7 +37,7 @@ type TimeFetcher interface {
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
type GenesisFetcher interface {
GenesisValidatorRoot() [32]byte
GenesisValidatorsRoot() [32]byte
}
// HeadFetcher defines a common interface for methods in blockchain service which
@@ -48,7 +49,7 @@ type HeadFetcher interface {
HeadState(ctx context.Context) (state.BeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte
HeadGenesisValidatorsRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
@@ -214,8 +215,8 @@ func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, er
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -223,7 +224,7 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
return [32]byte{}
}
return s.headGenesisValidatorRoot()
return s.headGenesisValidatorsRoot()
}
// HeadETH1Data returns the eth1data of the current head state.
@@ -247,16 +248,16 @@ func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorRoot returns the genesis validator
// GenesisValidatorsRoot returns the genesis validator
// root of the chain.
func (s *Service) GenesisValidatorRoot() [32]byte {
func (s *Service) GenesisValidatorsRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
}
// CurrentFork retrieves the latest fork information of the beacon chain.
@@ -334,6 +335,10 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
return s.cfg.ForkChoiceStore.Optimistic(ctx, s.head.root, s.head.slot)
}

View File

@@ -185,15 +185,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
}
}
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
}
func TestHeadETH1Data_Nil(t *testing.T) {
@@ -265,17 +265,17 @@ func TestService_HeadSeed(t *testing.T) {
require.DeepEqual(t, seed, root)
}
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
c.head = &head{}
root := c.HeadGenesisValidatorRoot()
root := c.HeadGenesisValidatorsRoot()
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root = c.HeadGenesisValidatorRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
}
func TestService_ProtoArrayStore(t *testing.T) {
@@ -357,6 +357,11 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
}
func TestService_IsOptimistic(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
@@ -367,6 +372,14 @@ func TestService_IsOptimistic(t *testing.T) {
require.Equal(t, true, opt)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}

View File

@@ -265,10 +265,10 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
return s.head.state.Copy()
}
// This returns the genesis validator root of the head state.
// This returns the genesis validators root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
func (s *Service) headGenesisValidatorsRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
}
// This returns the validator referenced by the provided index in

View File

@@ -130,7 +130,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, doma
if err != nil {
return nil, err
}
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.

View File

@@ -122,7 +122,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
@@ -136,7 +136,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
@@ -150,7 +150,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)

View File

@@ -0,0 +1,49 @@
package blockchain
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
type mockEngineService struct {
newPayloadError error
forkchoiceError error
blks map[[32]byte]*enginev1.ExecutionBlock
}
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, m.newPayloadError
}
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, m.forkchoiceError
}
func (*mockEngineService) GetPayloadV1(
_ context.Context, _ enginev1.PayloadIDBytes,
) *enginev1.ExecutionPayload {
return nil
}
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
return nil, nil
}
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
return nil
}
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
return nil, nil
}
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
if !ok {
return nil, errors.New("block not found")
}
return blk, nil
}

View File

@@ -2,15 +2,140 @@ package blockchain
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/sirupsen/logrus"
)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
return nil, errors.New("nil head block")
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
if isPreBellatrix(headBlk.Version()) {
return nil, nil
}
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if isPreBellatrix(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash,
}
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
return payloadID, nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload
func (s *Service) notifyNewPayload(ctx context.Context, preState, postState state.BeaconState, blk block.SignedBeaconBlock) error {
if preState == nil || postState == nil {
return errors.New("pre and post states must not be nil")
}
// Execution payload is only supported in Bellatrix and beyond.
if isPreBellatrix(postState.Version()) {
return nil
}
if err := helpers.BeaconBlockIsNil(blk); err != nil {
return err
}
body := blk.Block().Body()
enabled, err := blocks.ExecutionEnabled(postState, blk.Block().Body())
if err != nil {
return errors.Wrap(err, "could not determine if execution is enabled")
}
if !enabled {
return nil
}
payload, err := body.ExecutionPayload()
if err != nil {
return errors.Wrap(err, "could not get execution payload")
}
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"slot": postState.Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return nil
default:
return errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preState.Version()) {
return nil
}
atTransition, err := blocks.MergeTransitionBlock(preState, body)
if err != nil {
return errors.Wrap(err, "could not check if merge block is terminal")
}
if !atTransition {
return nil
}
return s.validateMergeBlock(ctx, blk)
}
// isPreBellatrix returns true if input version is before bellatrix fork.
func isPreBellatrix(v int) bool {
return v == version.Phase0 || v == version.Altair
}
// optimisticCandidateBlock returns true if this block can be optimistically synced.
//
// Spec pseudocode definition:
@@ -40,11 +165,11 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
func (s *Service) loadSyncedTips(root [32]byte, slot types.Slot) error {
// Initialize synced tips
tips, err := s.cfg.BeaconDB.ValidatedTips(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get synced tips")
}
if len(tips) == 0 {
if err != nil || len(tips) == 0 {
tips[root] = slot
if err != nil {
log.WithError(err).Warn("Could not read synced tips from DB, using finalized checkpoint as synced tip")
}
}
if err := s.cfg.ForkChoiceStore.SetSyncedTips(tips); err != nil {
return errors.Wrap(err, "could not set synced tips")

View File

@@ -7,10 +7,13 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -19,6 +22,305 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, err)
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
finalizedRoot [32]byte
newForkchoiceErr error
errString string
}{
{
name: "nil block",
errString: "nil head block",
},
{
name: "phase0 block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return b
}(),
},
{
name: "altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}})
require.NoError(t, err)
return b
}(),
},
{
name: "not execution block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
})
require.NoError(t, err)
return b
}(),
},
{
name: "happy case: finalized root is altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: altairBlkRoot,
},
{
name: "happy case: finalized root is bellatrix block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with optimistic block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with invalid block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
})
}
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
preState state.BeaconState
postState state.BeaconState
blk block.SignedBeaconBlock
newPayloadErr error
errString string
}{
{
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
},
{
name: "altair post state",
postState: altairState,
preState: altairState,
},
{
name: "nil states",
errString: "pre and post states must not be nil",
},
{
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
},
{
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state",
postState: bellatrixState,
preState: altairState,
blk: bellatrixBlk,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "not at merge transition",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "happy case",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
}
for _, tt := range tests {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
err := service.notifyNewPayload(ctx, tt.preState, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
}
}
func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -115,7 +417,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
blk.Block.Body.ExecutionPayload.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)

View File

@@ -1,20 +1,132 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// validates terminal pow block by comparing own total difficulty with parent's total difficulty.
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
//
// def validate_merge_block(block: BeaconBlock) -> None:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b block.SignedBeaconBlock) error {
if err := helpers.BeaconBlockIsNil(b); err != nil {
return err
}
payload, err := b.Block().Body().ExecutionPayload()
if err != nil {
return err
}
if payload == nil {
return errors.New("nil execution payload")
}
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
return errors.Wrap(err, "could not validate terminal block hash")
}
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
if err != nil {
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
}
_, mergeBlockParentTD, err := s.getBlkParentHashAndTD(ctx, mergeBlockParentHash)
if err != nil {
return errors.Wrap(err, "could not get merge parent block total difficulty")
}
valid, err := validateTerminalBlockDifficulties(mergeBlockTD, mergeBlockParentTD)
if err != nil {
return err
}
if !valid {
return fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
}
log.WithFields(logrus.Fields{
"slot": b.Block().Slot(),
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
"mergeBlockTotalDifficulty": mergeBlockTD,
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
}).Info("Validated terminal block")
return nil
}
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
if err != nil {
return nil, nil, errors.Wrap(err, "could not get pow block")
}
if blk == nil {
return nil, nil, errors.New("pow block is nil")
}
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
}
blkTDUint256, overflows := uint256.FromBig(blkTDBig)
if overflows {
return nil, nil, errors.New("total difficulty overflows")
}
return blk.ParentHash, blkTDUint256, nil
}
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
// spec code:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
return nil
}
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
return errors.New("terminal block hash activation epoch not reached")
}
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
return errors.New("parent hash does not match terminal block hash")
}
return nil
}
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
//
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
func validTerminalPowBlock(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
if !ok {
return false, errors.New("failed to parse terminal total difficulty")

View File

@@ -1,12 +1,21 @@
package blockchain
import (
"context"
"fmt"
"math/big"
"testing"
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -66,10 +75,10 @@ func Test_validTerminalPowBlock(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = fmt.Sprint(tt.ttd)
params.OverrideBeaconConfig(cfg)
got, err := validTerminalPowBlock(tt.currentDifficulty, tt.parentDifficulty)
got, err := validateTerminalBlockDifficulties(tt.currentDifficulty, tt.parentDifficulty)
require.NoError(t, err)
if got != tt.want {
t.Errorf("validTerminalPowBlock() = %v, want %v", got, tt.want)
t.Errorf("validateTerminalBlockDifficulties() = %v, want %v", got, tt.want)
}
})
}
@@ -87,7 +96,115 @@ func Test_validTerminalPowBlockSpecConfig(t *testing.T) {
parent, of := uint256.FromBig(i)
require.Equal(t, of, false)
got, err := validTerminalPowBlock(current, parent)
got, err := validateTerminalBlockDifficulties(current, parent)
require.NoError(t, err)
require.Equal(t, true, got)
}
func Test_validateMergeBlock(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.validateMergeBlock(ctx, b))
cfg.TerminalTotalDifficulty = "1"
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", service.validateMergeBlock(ctx, b))
}
func Test_getBlkParentHashAndTD(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
h := [32]byte{'a'}
p := [32]byte{'b'}
td := "0x1"
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: td,
}
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
require.NoError(t, err)
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
require.Equal(t, td, totalDifficulty.String())
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
require.ErrorContains(t, "could not get pow block: block not found", err)
engine.blks[h] = nil
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "pow block is nil", err)
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "1",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex number > 256 bits", err)
}
func Test_validateTerminalBlockHash(t *testing.T) {
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
cfg := params.BeaconConfig()
cfg.TerminalBlockHash = [32]byte{0x01}
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
cfg.TerminalBlockHashActivationEpoch = 0
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
}

View File

@@ -113,7 +113,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
}
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)

View File

@@ -100,7 +100,7 @@ func TestStore_OnBlock(t *testing.T) {
return b
}(),
s: st.Copy(),
wantErrString: "is not a descendent of the current finalized block",
wantErrString: "is not a descendant of the current finalized block",
},
{
name: "same slot as finalized block",
@@ -789,7 +789,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
finalizedRoot: r1,
parentRoot: r,
},
wantedErr: "is not a descendent of the current finalized block slot",
wantedErr: "is not a descendant of the current finalized block slot",
},
{
name: "is descendant",

View File

@@ -27,6 +27,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
@@ -83,6 +84,7 @@ type config struct {
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller enginev1.Caller
}
// NewService instantiates a new block service instance that will
@@ -217,7 +219,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
GenesisValidatorsRoot: saved.GenesisValidatorRoot(),
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
},
})
@@ -379,7 +381,7 @@ func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
GenesisValidatorsRoot: initializedState.GenesisValidatorRoot(),
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
},
})
}

View File

@@ -347,7 +347,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
@@ -391,7 +391,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
@@ -447,7 +447,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))

View File

@@ -32,34 +32,35 @@ var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
State state.BeaconState
Root []byte
Block block.SignedBeaconBlock
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
PublicKey [fieldparams.BLSPubkeyLength]byte
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
BlocksReceived []block.SignedBeaconBlock
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
Balance *precompute.Balance
Genesis time.Time
ValidatorsRoot [32]byte
ForkChoiceStore *protoarray.Store
CanonicalRoots map[[32]byte]bool
Fork *ethpb.Fork
ETH1Data *ethpb.Eth1Data
InitSyncBlockRoots map[[32]byte]bool
DB db.Database
State state.BeaconState
Block block.SignedBeaconBlock
VerifyBlkDescendantErr error
stateNotifier statefeed.Notifier
BlocksReceived []block.SignedBeaconBlock
SyncCommitteeIndices []types.CommitteeIndex
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
ValidAttestation bool
ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
SyncCommitteeIndices []types.CommitteeIndex
Root []byte
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
PublicKey [fieldparams.BLSPubkeyLength]byte
SyncCommitteePubkeys [][]byte
InitSyncBlockRoots map[[32]byte]bool
Genesis time.Time
}
// StateNotifier mocks the same method in the chain service.
@@ -329,8 +330,8 @@ func (s *ChainService) GenesisTime() time.Time {
return s.Genesis
}
// GenesisValidatorRoot mocks the same method in the chain service.
func (s *ChainService) GenesisValidatorRoot() [32]byte {
// GenesisValidatorsRoot mocks the same method in the chain service.
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
return s.ValidatorsRoot
}
@@ -370,8 +371,8 @@ func (s *ChainService) HasInitSyncBlock(rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorRoot() [32]byte {
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
@@ -442,7 +443,7 @@ func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ type
// IsOptimistic mocks the same method in the chain service.
func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
return false, nil
return s.Optimistic, nil
}
// IsOptimisticForRoot mocks the same method in the chain service.

View File

@@ -102,7 +102,7 @@ func FilterSyncCommitteeVotes(s state.BeaconStateAltair, sync *ethpb.SyncAggrega
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
func VerifySyncCommitteeSig(s state.BeaconStateAltair, syncKeys []bls.PublicKey, syncSig []byte) error {
ps := slots.PrevSlot(s.Slot())
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
if err != nil {
return err
}

View File

@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
numValidators := state.NumValidators()
s := &ethpb.BeaconStateAltair{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,

View File

@@ -77,7 +77,7 @@ func TestUpgradeToAltair(t *testing.T) {
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), aState.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), aState.GenesisValidatorRoot())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), aState.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), aState.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())

View File

@@ -208,7 +208,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
beaconState.Fork(),
indexedAtt.Data.Target.Epoch,
params.BeaconConfig().DomainBeaconAttester,
beaconState.GenesisValidatorRoot(),
beaconState.GenesisValidatorsRoot(),
)
if err != nil {
return err

View File

@@ -417,7 +417,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
Slot: 1,
},
})
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, prevDomain)
require.NoError(t, err)
@@ -437,7 +437,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
CommitteeIndex: 1,
},
})
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err = signing.ComputeSigningRoot(att2.Data, currDomain)
require.NoError(t, err)
@@ -476,7 +476,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
@@ -540,7 +540,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)

View File

@@ -108,7 +108,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
@@ -177,7 +177,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
@@ -246,7 +246,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")

View File

@@ -44,7 +44,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
AttestingIndices: setA,
Signature: make([]byte, 96),
}
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err, "Could not get signing root of beacon block header")

View File

@@ -57,7 +57,7 @@ func ProcessVoluntaryExits(
if err != nil {
return nil, err
}
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorRoot()); err != nil {
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)

View File

@@ -124,8 +124,8 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
return err
}
if !bytes.Equal(payload.Random, random) {
return errors.New("incorrect random")
if !bytes.Equal(payload.PrevRandao, random) {
return errors.New("incorrect prev randao")
}
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
if err != nil {
@@ -201,7 +201,7 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
Random: bytesutil.SafeCopyBytes(payload.Random),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
BlockNumber: payload.BlockNumber,
GasLimit: payload.GasLimit,
GasUsed: payload.GasUsed,
@@ -229,7 +229,7 @@ func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
if !bytes.Equal(p.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
return false
}
if !bytes.Equal(p.Random, make([]byte, fieldparams.RootLength)) {
if !bytes.Equal(p.PrevRandao, make([]byte, fieldparams.RootLength)) {
return false
}
if !bytes.Equal(p.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {
@@ -275,7 +275,7 @@ func isEmptyHeader(h *ethpb.ExecutionPayloadHeader) bool {
if !bytes.Equal(h.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
return false
}
if !bytes.Equal(h.Random, make([]byte, fieldparams.RootLength)) {
if !bytes.Equal(h.PrevRandao, make([]byte, fieldparams.RootLength)) {
return false
}
if !bytes.Equal(h.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {

View File

@@ -78,7 +78,7 @@ func Test_MergeComplete(t *testing.T) {
name: "has random",
payload: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
h.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
@@ -246,7 +246,7 @@ func Test_MergeBlock(t *testing.T) {
name: "empty header, payload has random",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
@@ -520,21 +520,21 @@ func Test_ValidatePayload(t *testing.T) {
name: "validate passes",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.Random = random
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect random",
name: "incorrect prev randao",
payload: emptyPayload(),
err: errors.New("incorrect random"),
err: errors.New("incorrect prev randao"),
},
{
name: "incorrect timestamp",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.Random = random
h.PrevRandao = random
h.Timestamp = 1
return h
}(),
@@ -568,21 +568,21 @@ func Test_ProcessPayload(t *testing.T) {
name: "process passes",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.Random = random
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect random",
name: "incorrect prev randao",
payload: emptyPayload(),
err: errors.New("incorrect random"),
err: errors.New("incorrect prev randao"),
},
{
name: "incorrect timestamp",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.Random = random
h.PrevRandao = random
h.Timestamp = 1
return h
}(),
@@ -621,7 +621,7 @@ func Test_PayloadToHeader(t *testing.T) {
p.StateRoot = b
p.ReceiptsRoot = b
p.LogsBloom = b
p.Random = b
p.PrevRandao = b
p.ExtraData = b
p.BaseFeePerGas = b
p.BlockHash = b
@@ -635,7 +635,7 @@ func Test_PayloadToHeader(t *testing.T) {
require.DeepSSZEqual(t, h.StateRoot, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.ReceiptRoot, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.LogsBloom, make([]byte, fieldparams.LogsBloomLength))
require.DeepSSZEqual(t, h.Random, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.PrevRandao, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.ExtraData, make([]byte, 0))
require.DeepSSZEqual(t, h.BaseFeePerGas, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.BlockHash, make([]byte, fieldparams.RootLength))
@@ -663,7 +663,7 @@ func emptyPayloadHeader() *ethpb.ExecutionPayloadHeader {
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
Random: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
@@ -678,7 +678,7 @@ func emptyPayload() *enginev1.ExecutionPayload {
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
Random: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),

View File

@@ -378,7 +378,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
sk := sks[tt.args.slashing.Header_1.Header.ProposerIndex]
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorRoot())
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
if tt.args.slashing.Header_1.Signature == nil {
sr, err := signing.ComputeSigningRoot(tt.args.slashing.Header_1.Header, d)

View File

@@ -26,7 +26,7 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
epoch := types.Epoch(0)
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, uint64(epoch))
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := (&ethpb.SigningData{ObjectRoot: buf, Domain: domain}).HashTreeRoot()
require.NoError(t, err)

View File

@@ -68,7 +68,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
sig []byte,
rootFunc func() ([32]byte, error)) error {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
@@ -83,7 +83,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
// VerifyBlockHeaderSignature verifies the proposer signature of a beacon block header.
func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
@@ -104,7 +104,7 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
@@ -122,7 +122,7 @@ func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
@@ -164,7 +164,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, uint64(currentEpoch))
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, nil, nil, err
}
@@ -231,7 +231,7 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
}
fork := beaconState.Fork()
gvr := beaconState.GenesisValidatorRoot()
gvr := beaconState.GenesisValidatorsRoot()
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.

View File

@@ -41,7 +41,7 @@ func TestVerifyBlockHeaderSignature(t *testing.T) {
beaconState.Fork(),
0,
params.BeaconConfig().DomainBeaconProposer,
beaconState.GenesisValidatorRoot(),
beaconState.GenesisValidatorsRoot(),
)
require.NoError(t, err)
htr, err := blockHeader.Header.HashTreeRoot()
@@ -77,7 +77,7 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
CurrentVersion: params.BeaconConfig().AltairForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
}
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot())
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
assert.NoError(t, err)
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)

View File

@@ -38,7 +38,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
s := &ethpb.BeaconStateBellatrix{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
@@ -71,7 +71,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
Random: make([]byte, 32),
PrevRandao: make([]byte, 32),
BlockNumber: 0,
GasLimit: 0,
GasUsed: 0,

View File

@@ -19,7 +19,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), mSt.GenesisValidatorRoot())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
@@ -67,7 +67,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
Random: make([]byte, 32),
PrevRandao: make([]byte, 32),
BlockNumber: 0,
GasLimit: 0,
GasUsed: 0,

View File

@@ -11,18 +11,22 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
var ErrNilSignedBeaconBlock = errors.New("signed beacon block can't be nil")
var ErrNilBeaconBlock = errors.New("beacon block can't be nil")
var ErrNilBeaconBlockBody = errors.New("beacon block body can't be nil")
// BeaconBlockIsNil checks if any composite field of input signed beacon block is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func BeaconBlockIsNil(b block.SignedBeaconBlock) error {
if b == nil || b.IsNil() {
return errors.New("signed beacon block can't be nil")
return ErrNilSignedBeaconBlock
}
if b.Block().IsNil() {
return errors.New("beacon block can't be nil")
return ErrNilBeaconBlock
}
if b.Block().Body().IsNil() {
return errors.New("beacon block body can't be nil")
return ErrNilBeaconBlockBody
}
return nil
}

View File

@@ -23,7 +23,7 @@ var ErrSigFailedToVerify = errors.New("signature did not verify")
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
@@ -69,7 +69,7 @@ func ComputeDomainVerifySigningRoot(st state.ReadOnlyBeaconState, index types.Va
if err != nil {
return err
}
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
if err != nil {
return err
}
@@ -219,7 +219,7 @@ func computeForkDataRoot(version, root []byte) ([32]byte, error) {
return r, nil
}
// ComputeForkDigest returns the fork for the current version and genesis validator root
// ComputeForkDigest returns the fork for the current version and genesis validators root
//
// Spec pseudocode definition:
// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:

View File

@@ -244,7 +244,7 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState,
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
Random: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),

View File

@@ -137,16 +137,6 @@ func CalculateStateRoot(
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
if signed.Version() == version.Altair || signed.Version() == version.Bellatrix {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return [32]byte{}, err
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return [32]byte{}, err
}
}
return state.HashTreeRoot(ctx)
}
@@ -183,16 +173,6 @@ func ProcessBlockNoVerifyAnySig(
if err != nil {
return nil, nil, err
}
if signed.Version() == version.Altair || signed.Version() == version.Bellatrix {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return nil, nil, err
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, nil, err
}
}
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
if err != nil {
@@ -340,6 +320,19 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "could not process block operation")
}
if signed.Block().Version() == version.Phase0 {
return state, nil
}
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return nil, errors.Wrap(err, "could not get sync aggregate from block")
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
return state, nil
}

View File

@@ -253,7 +253,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
hashTreeRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)

View File

@@ -7,3 +7,9 @@ import "github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
// i/o error. This variable copies the value in the kv package to the same scope as the Database interfaces,
// so that it is available to code paths that do not interact directly with the kv package.
var ErrNotFound = kv.ErrNotFound
// ErrNotFoundState wraps ErrNotFound for an error specific to a state not being found in the database.
var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot

View File

@@ -33,6 +33,7 @@ type ReadOnlyDatabase interface {
ValidatedTips(ctx context.Context) (map[[32]byte]types.Slot, error)
// State related methods.
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
GenesisState(ctx context.Context) (state.BeaconState, error)
HasState(ctx context.Context, blockRoot [32]byte) bool
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)

View File

@@ -81,7 +81,6 @@ go_test(
"checkpoint_test.go",
"deposit_contract_test.go",
"encoding_test.go",
"error_test.go",
"finalized_block_roots_test.go",
"genesis_test.go",
"init_test.go",

View File

@@ -1,6 +1,6 @@
package kv
import "errors"
import "github.com/pkg/errors"
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
@@ -8,42 +8,7 @@ var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.
var ErrNotFound = errors.New("not found in db")
var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundOriginBlockRoot = WrapDBError(ErrNotFound, "OriginBlockRoot")
// WrapDBError wraps an error in a DBError. See commentary on DBError for more context.
func WrapDBError(e error, outer string) error {
return DBError{
Wraps: e,
Outer: errors.New(outer),
}
}
// DBError implements the Error method so that it can be asserted as an error.
// The Unwrap method supports error wrapping, enabling it to be used with errors.Is/As.
// The primary use case is to make it simple for database methods to return errors
// that wrap ErrNotFound, allowing calling code to check for "not found" errors
// like: `error.Is(err, ErrNotFound)`. This is intended to improve error handling
// in db lookup methods that need to differentiate between a missing value and some
// other database error. for more background see:
// https://go.dev/blog/go1.13-errors
type DBError struct {
Wraps error
Outer error
}
// Error satisfies the error interface, so that DBErrors can be used anywhere that
// expects an `error`.
func (e DBError) Error() string {
es := e.Outer.Error()
if e.Wraps != nil {
es += ": " + e.Wraps.Error()
}
return es
}
// Unwrap is used by the errors package Is and As methods.
func (e DBError) Unwrap() error {
return e.Wraps
}
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")

View File

@@ -1,24 +0,0 @@
package kv
import (
"errors"
"testing"
)
func TestWrappedSentinelError(t *testing.T) {
e := ErrNotFoundOriginBlockRoot
if !errors.Is(e, ErrNotFoundOriginBlockRoot) {
t.Error("expected that a copy of ErrNotFoundOriginBlockRoot would have an is-a relationship")
}
outer := errors.New("wrapped error")
e2 := DBError{Wraps: ErrNotFoundOriginBlockRoot, Outer: outer}
if !errors.Is(e2, ErrNotFoundOriginBlockRoot) {
t.Error("expected that errors.Is would know DBError wraps ErrNotFoundOriginBlockRoot")
}
// test that the innermost not found error is detected
if !errors.Is(e2, ErrNotFound) {
t.Error("expected that errors.Is would know ErrNotFoundOriginBlockRoot wraps ErrNotFound")
}
}

View File

@@ -3,6 +3,7 @@ package kv
import (
"bytes"
"context"
"fmt"
"github.com/golang/snappy"
"github.com/pkg/errors"
@@ -46,6 +47,19 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
return s.unmarshalState(ctx, enc, valEntries)
}
// StateOrError is just like State(), except it only returns a non-error response
// if the requested state is found in the database.
func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
st, err := s.State(ctx, blockRoot)
if err != nil {
return nil, err
}
if st == nil || st.IsNil() {
return nil, errors.Wrap(ErrNotFoundState, fmt.Sprintf("no state with blockroot=%#x", blockRoot))
}
return st, nil
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")

View File

@@ -21,6 +21,12 @@ import (
bolt "go.etcd.io/bbolt"
)
func TestStateNil(t *testing.T) {
db := setupDB(t)
_, err := db.StateOrError(context.Background(), [32]byte{})
require.ErrorIs(t, err, ErrNotFoundState)
}
func TestState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)

View File

@@ -120,11 +120,6 @@ func (_ *Service) AllDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartDeposits mocks out the powchain functionality for interop.
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
return s.chainStartDeposits
}
// ChainStartEth1Data mocks out the powchain functionality for interop.
func (_ *Service) ChainStartEth1Data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{}

View File

@@ -8,7 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
)
// ForkChoicer represents the full fork choice interface composed of all of the sub-interfaces.
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
type ForkChoicer interface {
HeadRetriever // to compute head.
BlockProcessor // to track new block for fork choice.

View File

@@ -1,7 +1,7 @@
/*
Package protoarray implements proto array fork choice as outlined:
https://github.com/protolambda/lmd-ghost#array-based-stateful-dag-proto_array
This was motivated by the the original implementation by Sigma Prime here:
This was motivated by the original implementation by Sigma Prime here:
https://github.com/sigp/lighthouse/pull/804
*/
package protoarray

View File

@@ -24,7 +24,7 @@ func computeDeltas(
oldBalance := uint64(0)
newBalance := uint64(0)
// Skip if validator has never voted for current root and next root (ie. if the
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
continue

View File

@@ -47,6 +47,7 @@ func (f *ForkChoice) Optimistic(ctx context.Context, root [32]byte, slot types.S
f.syncedTips.RLock()
_, ok := f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()

View File

@@ -200,6 +200,10 @@ func TestOptimistic(t *testing.T) {
op, err = f.Optimistic(ctx, nodeK.root, nodeK.slot)
require.NoError(t, err)
require.Equal(t, op, true)
// request a write Lock to synced Tips regression #10289
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
}
// This tests the algorithm to update syncedTips
@@ -235,7 +239,7 @@ func TestUpdateSyncTipsWithValidRoots(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
newtips map[[32]byte]types.Slot // the updated synced tips
newTips map[[32]byte]types.Slot // the updated synced tips
wantedErr error
}{
{
@@ -327,7 +331,7 @@ func TestUpdateSyncTipsWithValidRoots(t *testing.T) {
} else {
require.NoError(t, err)
f.syncedTips.RLock()
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newtips)
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
f.syncedTips.RUnlock()
}
}
@@ -345,7 +349,7 @@ func TestUpdateSyncTipsWithValidRoots(t *testing.T) {
// J(1) -- K(1) -- L(0)
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice. The numbers in parenthesis are
// single block that is outside of Fork choice. The numbers in parentheses are
// the weights of the nodes before removal
//
func TestUpdateSyncTipsWithInvalidRoot(t *testing.T) {
@@ -535,15 +539,16 @@ func TestFindSyncedTip(t *testing.T) {
}
for _, tc := range tests {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node := f.store.nodes[f.store.nodesIndices[tc.root]]
syncedTips := &optimisticStore{
validatedTips: tc.tips,
}
syncedTips.RLock()
defer syncedTips.RUnlock()
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
require.NoError(t, err)
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
f.store.nodesLock.RUnlock()
syncedTips.RUnlock()
}
}

View File

@@ -143,7 +143,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.nodes[0].weight, uint64(0))
// Otherwise assuming a block, A, that is not-genesis:
// Otherwise, assuming a block, A, that is not-genesis:
//
// A -> B -> C
//
@@ -160,7 +160,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// (A: 54) -> (B: 44) -> (C: 24)
// \_->(D: 10)
//
// So B has its own weight, 10, and the sum of of both C and D thats why we see weight 54 in the
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
require.Equal(t, f.store.nodes[1].weight, uint64(54))
require.Equal(t, f.store.nodes[2].weight, uint64(44))

View File

@@ -295,7 +295,7 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
justifiedNode := s.nodes[justifiedIndex]
bestDescendantIndex := justifiedNode.bestDescendant
// If the justified node doesn't have a best descendent,
// If the justified node doesn't have a best descendant,
// the best node is itself.
if bestDescendantIndex == NonExistentNode {
bestDescendantIndex = justifiedIndex
@@ -332,28 +332,38 @@ func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
defer span.End()
// Set the input node to canonical.
s.canonicalNodes[root] = true
// Get the input's parent node index.
i := s.nodesIndices[root]
n := s.nodes[i]
p := n.parent
for p != NonExistentNode {
var newCanonicalRoots [][32]byte
var n *Node
for i != NonExistentNode {
if ctx.Err() != nil {
return ctx.Err()
}
// Get the parent node, if the node is already in canonical mapping,
// we can be sure rest of the ancestors are canonical. Exit early.
n = s.nodes[p]
n = s.nodes[i]
if s.canonicalNodes[n.root] {
break
}
// Set parent node to canonical. Repeat until parent node index is undefined.
s.canonicalNodes[n.root] = true
p = n.parent
newCanonicalRoots = append(newCanonicalRoots, n.root)
i = n.parent
}
// i is either NonExistentNode or has the index of the last canonical
// node before the last head update.
if i == NonExistentNode {
s.canonicalNodes = make(map[[fieldparams.RootLength]byte]bool)
} else {
for j := i + 1; j < uint64(len(s.nodes)); j++ {
delete(s.canonicalNodes, s.nodes[j].root)
}
}
for _, canonicalRoot := range newCanonicalRoots {
s.canonicalNodes[canonicalRoot] = true
}
return nil
@@ -378,7 +388,7 @@ func (s *Store) insert(ctx context.Context,
index := uint64(len(s.nodes))
parentIndex, ok := s.nodesIndices[parent]
// Mark genesis block's parent as non existent.
// Mark genesis block's parent as non-existent.
if !ok {
parentIndex = NonExistentNode
}
@@ -398,7 +408,7 @@ func (s *Store) insert(ctx context.Context,
s.nodesIndices[root] = index
s.nodes = append(s.nodes, n)
// Update parent with the best child and descendent only if it's available.
// Update parent with the best child and descendant only if it's available.
if n.parent != NonExistentNode {
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
return err
@@ -414,8 +424,8 @@ func (s *Store) insert(ctx context.Context,
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
// and its best child. For each node, it updates the weight with input delta and
// back propagate the nodes delta to its parents delta. After scoring changes,
// the best child is then updated along with best descendant.
// back propagate the nodes' delta to its parents' delta. After scoring changes,
// the best child is then updated along with the best descendant.
func (s *Store) applyWeightChanges(
ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch, newBalances []uint64, delta []int,
) error {
@@ -478,13 +488,13 @@ func (s *Store) applyWeightChanges(
n.weight += uint64(nodeDelta)
}
// Update parent's best child and descendent if the node has a known parent.
// Update parent's best child and descendant if the node has a known parent.
if n.parent != NonExistentNode {
// Protection against node parent index out of bound. This should not happen.
if int(n.parent) >= len(delta) {
return errInvalidParentDelta
}
// Back propagate the nodes delta to its parent.
// Back propagate the nodes' delta to its parent.
delta[n.parent] += nodeDelta
}
}
@@ -510,14 +520,14 @@ func (s *Store) applyWeightChanges(
return nil
}
// updateBestChildAndDescendant updates parent node's best child and descendent.
// updateBestChildAndDescendant updates parent node's best child and descendant.
// It looks at input parent node and input child node and potentially modifies parent's best
// child and best descendent indices.
// child and best descendant indices.
// There are four outcomes:
// 1.) The child is already the best child but it's now invalid due to a FFG change and should be removed.
// 1.) The child is already the best child, but it's now invalid due to a FFG change and should be removed.
// 2.) The child is already the best child and the parent is updated with the new best descendant.
// 3.) The child is not the best child but becomes the best child.
// 4.) The child is not the best child and does not become best child.
// 4.) The child is not the best child and does not become the best child.
func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) error {
// Protection against parent index out of bound, this should not happen.
@@ -552,12 +562,12 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
if parent.bestChild != NonExistentNode {
if parent.bestChild == childIndex && !childLeadsToViableHead {
// If the child is already the best child of the parent but it's not viable for head,
// If the child is already the best child of the parent, but it's not viable for head,
// we should remove it. (Outcome 1)
newParentChild = changeToNone
} else if parent.bestChild == childIndex {
// If the child is already the best child of the parent, set it again to ensure best
// descendent of the parent is updated. (Outcome 2)
// If the child is already the best child of the parent, set it again to ensure the best
// descendant of the parent is updated. (Outcome 2)
newParentChild = changeToChild
} else {
// Protection against parent's best child going out of bound.
@@ -572,7 +582,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
}
if childLeadsToViableHead && !bestChildLeadsToViableHead {
// The child leads to a viable head, but the current parent's best child doesnt.
// The child leads to a viable head, but the current parent's best child doesn't.
newParentChild = changeToChild
} else if !childLeadsToViableHead && bestChildLeadsToViableHead {
// The child doesn't lead to a viable head, the current parent's best child does.
@@ -670,7 +680,7 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
}
s.nodesIndices[finalizedRoot] = uint64(0)
// Recompute best child and descendant for each canonical nodes.
// Recompute the best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {
if node.bestChild != NonExistentNode {
node.bestChild = canonicalNodesMap[node.bestChild]
@@ -686,27 +696,27 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
return nil
}
// leadsToViableHead returns true if the node or the best descendent of the node is viable for head.
// leadsToViableHead returns true if the node or the best descendant of the node is viable for head.
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
var bestDescendentViable bool
bestDescendentIndex := node.bestDescendant
var bestDescendantViable bool
bestDescendantIndex := node.bestDescendant
// If the best descendant is not part of the leaves.
if bestDescendentIndex != NonExistentNode {
// Protection against out of bound, best descendent index can not be
if bestDescendantIndex != NonExistentNode {
// Protection against out of bound, the best descendant index can not be
// exceeds length of nodes list.
if bestDescendentIndex >= uint64(len(s.nodes)) {
if bestDescendantIndex >= uint64(len(s.nodes)) {
return false, errInvalidBestDescendantIndex
}
bestDescendentNode := s.nodes[bestDescendentIndex]
bestDescendentViable = s.viableForHead(bestDescendentNode)
bestDescendantNode := s.nodes[bestDescendantIndex]
bestDescendantViable = s.viableForHead(bestDescendantNode)
}
// The node is viable as long as the best descendent is viable.
return bestDescendentViable || s.viableForHead(node), nil
// The node is viable as long as the best descendant is viable.
return bestDescendantViable || s.viableForHead(node), nil
}
// viableForHead returns true if the node is viable to head.

View File

@@ -117,12 +117,11 @@ func TestStore_Head_UnknownJustifiedIndex(t *testing.T) {
func TestStore_Head_Itself(t *testing.T) {
r := [32]byte{'A'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
indices := map[[32]byte]uint64{r: 0}
// Since the justified node does not have a best descendant so the best node
// is itself.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
require.NoError(t, err)
assert.Equal(t, r, h)
@@ -131,12 +130,11 @@ func TestStore_Head_Itself(t *testing.T) {
func TestStore_Head_BestDescendant(t *testing.T) {
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
indices := map[[32]byte]uint64{r: 0, best: 1}
// Since the justified node's best descendent is at index 1 and it's root is `best`,
// Since the justified node's best descendant is at index 1, and its root is `best`,
// the head should be `best`.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1, parent: NonExistentNode}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
require.NoError(t, err)
assert.Equal(t, best, h)
@@ -146,9 +144,9 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
indices := map[[32]byte]uint64{r: 0, best: 1}
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: 1}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
cancel()
_, err := s.head(ctx, r)
require.ErrorContains(t, "context canceled", err)
@@ -265,7 +263,7 @@ func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child doesnt lead to viable index.
// parent's best child doesn't lead to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
@@ -720,7 +718,7 @@ func TestStore_UpdateCanonicalNodes_WholeList(t *testing.T) {
f.store.nodesIndices[[32]byte{'c'}] = 2
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, len(f.store.nodes), len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.DeepEqual(t, f.Node([32]byte{'c'}), f.store.nodes[2])
@@ -760,3 +758,34 @@ func TestStore_UpdateCanonicalNodes_ContextCancelled(t *testing.T) {
cancel()
require.ErrorContains(t, "context canceled", f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
}
func TestStore_UpdateCanonicalNodes_RemoveOldCanonical(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{
[32]byte{'a'}: 0,
[32]byte{'b'}: 1,
[32]byte{'c'}: 2,
[32]byte{'d'}: 3,
[32]byte{'e'}: 4,
}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
{slot: 4, root: [32]byte{'d'}, parent: 1},
{slot: 5, root: [32]byte{'e'}, parent: 3},
}
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, 3, len(f.store.canonicalNodes))
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'e'}))
require.Equal(t, 4, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
require.Equal(t, true, f.IsCanonical([32]byte{'d'}))
require.Equal(t, true, f.IsCanonical([32]byte{'e'}))
_, ok := f.store.canonicalNodes[[32]byte{'c'}]
require.Equal(t, false, ok)
}

View File

@@ -834,6 +834,7 @@ func (b *BeaconNode) registerGRPCGateway() error {
selfCert := b.cliCtx.String(flags.CertFlag.Name)
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
httpModules := b.cliCtx.String(flags.HTTPModules.Name)
timeout := b.cliCtx.Int(cmd.ApiTimeoutFlag.Name)
if enableDebugRPCEndpoints {
maxCallSize = uint64(math.Max(float64(maxCallSize), debugGrpcMaxMsgSize))
}
@@ -855,6 +856,7 @@ func (b *BeaconNode) registerGRPCGateway() error {
apigateway.WithRemoteCert(selfCert),
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
apigateway.WithAllowedOrigins(allowedOrigins),
apigateway.WithTimeout(uint64(timeout)),
}
if flags.EnableHTTPEthAPI(httpModules) {
opts = append(opts, apigateway.WithApiMiddleware(&apimiddleware.BeaconEndpointFactory{}))

View File

@@ -98,7 +98,7 @@ func (s *Service) PublishToTopic(ctx context.Context, topic string, data []byte,
// SubscribeToTopic joins (if necessary) and subscribes to PubSub topic.
func (s *Service) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error) {
s.awaitStateInitialized() // Genesis time and genesis validator root are required to subscribe.
s.awaitStateInitialized() // Genesis time and genesis validators root are required to subscribe.
topicHandle, err := s.JoinTopic(topic)
if err != nil {

View File

@@ -490,7 +490,7 @@ func (s *Service) connectToBootnodes() error {
return nil
}
// Returns true if the service is aware of the genesis time and genesis validator root. This is
// Returns true if the service is aware of the genesis time and genesis validators root. This is
// required for discovery and pubsub validation.
func (s *Service) isInitialized() bool {
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32

View File

@@ -306,7 +306,7 @@ func TestService_JoinLeaveTopic(t *testing.T) {
// digest associated with that genesis event.
func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte {
gt := prysmTime.Now()
gvr := bytesutil.PadTo([]byte("genesis validator root"), 32)
gvr := bytesutil.PadTo([]byte("genesis validators root"), 32)
for n := 0; n == 0; {
if ctx.Err() != nil {
t.Fatal(ctx.Err())

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"block_cache.go",
"block_reader.go",
"check_transition_config.go",
"deposit.go",
"log.go",
"log_processing.go",
@@ -30,8 +31,10 @@ go_library(
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native/v1:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",
@@ -42,6 +45,7 @@ go_library(
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
@@ -52,6 +56,7 @@ go_library(
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -67,6 +72,7 @@ go_test(
srcs = [
"block_cache_test.go",
"block_reader_test.go",
"check_transition_config_test.go",
"deposit_test.go",
"init_test.go",
"log_processing_test.go",
@@ -85,6 +91,8 @@ go_test(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -0,0 +1,82 @@
package powchain
import (
"context"
"errors"
"math/big"
"time"
"github.com/holiman/uint256"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/config/params"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
var (
checkTransitionPollingInterval = time.Second * 10
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
"your node will not be able to complete the proof-of-stake transition"
)
// Checks the transition configuration between Prysm and the connected execution node to ensure
// there are no differences in terminal block difficulty and block hash.
// If there are any discrepancies, we must log errors to ensure users can resolve
//the problem and be ready for the merge transition.
func (s *Service) checkTransitionConfiguration(ctx context.Context) {
if s.engineAPIClient == nil {
return
}
i := new(big.Int)
i.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
ttd := new(uint256.Int)
ttd.SetFromBig(i)
cfg := &pb.TransitionConfiguration{
TerminalTotalDifficulty: ttd.Hex(),
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
TerminalBlockNumber: big.NewInt(0).Bytes(), // A value of 0 is recommended in the request.
}
err := s.engineAPIClient.ExchangeTransitionConfiguration(ctx, cfg)
if err != nil {
if errors.Is(err, v1.ErrConfigMismatch) {
log.WithError(err).Fatal(configMismatchLog)
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
// We poll the execution client to see if the transition configuration has changed.
// This serves as a heartbeat to ensure the execution client and Prysm are ready for the
// Bellatrix hard-fork transition.
ticker := time.NewTicker(checkTransitionPollingInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
err = s.engineAPIClient.ExchangeTransitionConfiguration(ctx, cfg)
s.handleExchangeConfigurationError(err)
}
}
}
// We check if there is a configuration mismatch error between the execution client
// and the Prysm beacon node. If so, we need to log errors in the node as it cannot successfully
// complete the merge transition for the Bellatrix hard fork.
func (s *Service) handleExchangeConfigurationError(err error) {
if err == nil {
// If there is no error in checking the exchange configuration error, we clear
// the run error of the service if we had previously set it to ErrConfigMismatch.
if errors.Is(s.runError, v1.ErrConfigMismatch) {
s.runError = nil
}
return
}
// If the error is a configuration mismatch, we set a runtime error in the service.
if errors.Is(err, v1.ErrConfigMismatch) {
s.runError = err
log.WithError(err).Error(configMismatchLog)
return
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}

View File

@@ -0,0 +1,59 @@
package powchain
import (
"context"
"errors"
"testing"
"time"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_checkTransitionConfiguration(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
m := &mocks.EngineClient{}
m.Err = errors.New("something went wrong")
srv := &Service{}
srv.engineAPIClient = m
checkTransitionPollingInterval = time.Millisecond
ctx, cancel := context.WithCancel(ctx)
go srv.checkTransitionConfiguration(ctx)
<-time.After(100 * time.Millisecond)
cancel()
require.LogsContain(t, hook, "Could not check configuration values")
}
func TestService_handleExchangeConfigurationError(t *testing.T) {
hook := logTest.NewGlobal()
t.Run("clears existing service error", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.runError = v1.ErrConfigMismatch
srv.handleExchangeConfigurationError(nil)
require.Equal(t, true, srv.Status() == nil)
})
t.Run("does not clear existing service error if wrong kind", func(t *testing.T) {
srv := &Service{isRunning: true}
err := errors.New("something else went wrong")
srv.runError = err
srv.handleExchangeConfigurationError(nil)
require.ErrorIs(t, err, srv.Status())
})
t.Run("sets service error on config mismatch", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.handleExchangeConfigurationError(v1.ErrConfigMismatch)
require.Equal(t, v1.ErrConfigMismatch, srv.Status())
require.LogsContain(t, hook, configMismatchLog)
})
t.Run("does not set service error if unrelated problem", func(t *testing.T) {
srv := &Service{isRunning: true}
srv.handleExchangeConfigurationError(errors.New("foo"))
require.Equal(t, true, srv.Status() == nil)
require.LogsContain(t, hook, "Could not check configuration values")
})
}

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"client.go",
"errors.go",
"options.go",
@@ -10,23 +11,35 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
srcs = [
"auth_test.go",
"client_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_golang_jwt_jwt_v4//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -0,0 +1,44 @@
package v1
import (
"net/http"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/pkg/errors"
)
// This creates a custom HTTP transport which we can attach to our HTTP client
// in order to inject JWT auth strings into our HTTP request headers. Authentication
// is required when interacting with an Ethereum engine API server via HTTP, and JWT
// is chosen as the scheme of choice.
// For more details on the requirements of authentication when using the engine API, see
// the specification here: https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md
//
// To use this transport, initialize a new &http.Client{} from the standard library
// and set the Transport field to &jwtTransport{} with values
// http.DefaultTransport and a JWT secret.
type jwtTransport struct {
underlyingTransport http.RoundTripper
jwtSecret []byte
}
// RoundTrip ensures our transport implements http.RoundTripper interface from the
// standard library. When used as the transport for an HTTP client, the code below
// will run every time our client makes an HTTP request. This is used to inject
// an JWT bearer token in the Authorization request header of every outgoing request
// our HTTP client makes.
func (t *jwtTransport) RoundTrip(req *http.Request) (*http.Response, error) {
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
// Required claim for engine API auth. "iat" stands for issued at
// and it must be a unix timestamp that is +/- 5 seconds from the current
// timestamp at the moment the server verifies this value.
"iat": time.Now().Unix(),
})
tokenString, err := token.SignedString(t.jwtSecret)
if err != nil {
return nil, errors.Wrap(err, "could not produce signed JWT token")
}
req.Header.Set("Authorization", "Bearer "+tokenString)
return t.underlyingTransport.RoundTrip(req)
}

View File

@@ -0,0 +1,53 @@
package v1
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestJWTAuthTransport(t *testing.T) {
secret := bytesutil.PadTo([]byte("foo"), 32)
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: secret,
}
client := &http.Client{
Timeout: DefaultTimeout,
Transport: authTransport,
}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
reqToken := r.Header.Get("Authorization")
splitToken := strings.Split(reqToken, "Bearer")
// The format should be `Bearer ${token}`.
require.Equal(t, 2, len(splitToken))
reqToken = strings.TrimSpace(splitToken[1])
token, err := jwt.Parse(reqToken, func(token *jwt.Token) (interface{}, error) {
// We should be doing HMAC signing.
_, ok := token.Method.(*jwt.SigningMethodHMAC)
require.Equal(t, true, ok)
return secret, nil
})
require.NoError(t, err)
require.Equal(t, true, token.Valid)
claims, ok := token.Claims.(jwt.MapClaims)
require.Equal(t, true, ok)
item, ok := claims["iat"]
require.Equal(t, true, ok)
iat, ok := item.(float64)
require.Equal(t, true, ok)
issuedAt := time.Unix(int64(iat), 0)
// The claims should have an "iat" field (issued at) that is at most, 5 seconds ago.
since := time.Since(issuedAt)
require.Equal(t, true, since <= time.Second*5)
}))
defer srv.Close()
_, err := client.Get(srv.URL)
require.NoError(t, err)
}

View File

@@ -4,13 +4,18 @@
package v1
import (
"bytes"
"context"
"fmt"
"math/big"
"net/url"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
@@ -21,10 +26,12 @@ const (
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// GetPayloadMethod v1 request string for JSON-RPC.
GetPayloadMethod = "engine_getPayloadV1"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
// ExecutionBlockByHashMethod request string for JSON-RPC.
ExecutionBlockByHashMethod = "eth_blockByHash"
// LatestExecutionBlockMethod request string for JSON-RPC.
LatestExecutionBlockMethod = "eth_blockByNumber"
ExecutionBlockByHashMethod = "eth_getBlockByHash"
// ExecutionBlockByNumberMethod request string for JSON-RPC.
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
// DefaultTimeout for HTTP.
DefaultTimeout = time.Second * 5
)
@@ -32,18 +39,21 @@ const (
// ForkchoiceUpdatedResponse is the response kind received by the
// engine_forkchoiceUpdatedV1 endpoint.
type ForkchoiceUpdatedResponse struct {
Status *pb.PayloadStatus `json:"status"`
Status *pb.PayloadStatus `json:"payloadStatus"`
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
}
// EngineCaller defines a client that can interact with an Ethereum
// Caller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error)
type Caller interface {
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
) (*ForkchoiceUpdatedResponse, error)
) (*pb.PayloadIDBytes, []byte, error)
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
}
@@ -65,6 +75,11 @@ func New(ctx context.Context, endpoint string, opts ...Option) (*Client, error)
c := &Client{
cfg: defaultConfig(),
}
for _, opt := range opts {
if err := opt(c); err != nil {
return nil, err
}
}
switch u.Scheme {
case "http", "https":
c.rpc, err = rpc.DialHTTPWithClient(endpoint, c.cfg.httpClient)
@@ -76,28 +91,59 @@ func New(ctx context.Context, endpoint string, opts ...Option) (*Client, error)
if err != nil {
return nil, err
}
for _, opt := range opts {
if err := opt(c); err != nil {
return nil, err
}
}
return c, nil
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
func (c *Client) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error) {
func (c *Client) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error) {
result := &pb.PayloadStatus{}
err := c.rpc.CallContext(ctx, result, NewPayloadMethod, payload)
return result, handleRPCError(err)
if err != nil {
return nil, handleRPCError(err)
}
switch result.Status {
case pb.PayloadStatus_INVALID_BLOCK_HASH:
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
return nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:
return result.LatestValidHash, ErrInvalidPayloadStatus
case pb.PayloadStatus_VALID:
return result.LatestValidHash, nil
default:
return nil, ErrUnknownPayloadStatus
}
}
// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
func (c *Client) ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
) (*ForkchoiceUpdatedResponse, error) {
) (*pb.PayloadIDBytes, []byte, error) {
result := &ForkchoiceUpdatedResponse{}
err := c.rpc.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
return result, handleRPCError(err)
if err != nil {
return nil, nil, handleRPCError(err)
}
if result.Status == nil {
return nil, nil, ErrNilResponse
}
resp := result.Status
switch resp.Status {
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
case pb.PayloadStatus_SYNCING:
return nil, nil, ErrAcceptedSyncingPayloadStatus
case pb.PayloadStatus_INVALID:
return nil, resp.LatestValidHash, ErrInvalidPayloadStatus
case pb.PayloadStatus_VALID:
return result.PayloadId, resp.LatestValidHash, nil
default:
return nil, nil, ErrUnknownPayloadStatus
}
}
// GetPayload calls the engine_getPayloadV1 method via JSON-RPC.
@@ -107,6 +153,44 @@ func (c *Client) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Executi
return result, handleRPCError(err)
}
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
func (c *Client) ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error {
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
zeroBigNum := big.NewInt(0)
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
result := &pb.TransitionConfiguration{}
if err := c.rpc.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
return handleRPCError(err)
}
// We surface an error to the user if local configuration settings mismatch
// according to the response from the execution node.
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
return errors.Wrapf(
ErrConfigMismatch,
"got %#x from execution node, wanted %#x",
result.TerminalBlockHash,
cfgTerminalHash,
)
}
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
ttdResult, err := hexutil.DecodeBig(result.TerminalTotalDifficulty)
if err != nil {
return errors.Wrap(err, "could not decode received terminal total difficulty")
}
if ttdResult.String() != ttdCfg {
return errors.Wrapf(
ErrConfigMismatch,
"got %s from execution node, wanted %s",
ttdResult.String(),
ttdCfg,
)
}
return nil
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (c *Client) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
@@ -114,7 +198,7 @@ func (c *Client) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
err := c.rpc.CallContext(
ctx,
result,
LatestExecutionBlockMethod,
ExecutionBlockByNumberMethod,
"latest",
false, /* no full transaction objects */
)
@@ -125,7 +209,7 @@ func (c *Client) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
// eth_blockByHash via JSON-RPC.
func (c *Client) ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error) {
result := &pb.ExecutionBlock{}
err := c.rpc.CallContext(ctx, result, ExecutionBlockByHashMethod, hash)
err := c.rpc.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, false /* no full transaction objects */)
return result, handleRPCError(err)
}

View File

@@ -13,13 +13,21 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/testing/require"
"google.golang.org/protobuf/proto"
)
var _ = EngineCaller(&Client{})
var (
_ = Caller(&Client{})
_ = Caller(&mocks.EngineClient{})
)
func TestClient_IPC(t *testing.T) {
server := newTestIPCServer(t)
@@ -42,21 +50,27 @@ func TestClient_IPC(t *testing.T) {
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
resp, err := client.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
require.NoError(t, err)
require.DeepEqual(t, want.Status, resp.Status)
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
})
t.Run(NewPayloadMethod, func(t *testing.T) {
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
resp, err := client.NewPayload(ctx, req)
latestValidHash, err := client.NewPayload(ctx, req)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
t.Run(LatestExecutionBlockMethod, func(t *testing.T) {
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
err := client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
})
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
resp, err := client.LatestExecutionBlock(ctx)
@@ -119,7 +133,7 @@ func TestClient_HTTP(t *testing.T) {
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
@@ -127,100 +141,176 @@ func TestClient_HTTP(t *testing.T) {
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
Random: []byte("random"),
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
forkChoiceStateReq, err := json.Marshal(forkChoiceState)
require.NoError(t, err)
payloadAttrsReq, err := json.Marshal(payloadAttributes)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(forkChoiceStateReq),
))
require.Equal(t, true, strings.Contains(
jsonRequestString, string(payloadAttrsReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.NoError(t, err)
require.DeepEqual(t, want.Status, resp.Status)
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
})
t.Run(NewPayloadMethod, func(t *testing.T) {
t.Run(ForkchoiceUpdatedMethod+" SYNCING status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(ForkchoiceUpdatedMethod+" INVALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedInvalidResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorIs(t, err, ErrInvalidPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
})
t.Run(ForkchoiceUpdatedMethod+" UNKNOWN status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedAcceptedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(execPayload)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(LatestExecutionBlockMethod, func(t *testing.T) {
t.Run(NewPayloadMethod+" SYNCING status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID_BLOCK_HASH status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorContains(t, "could not validate block hash", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorContains(t, "could not satisfy terminal block condition", err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(NewPayloadMethod+" UNKNOWN status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["UnknownStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadSetup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -250,6 +340,44 @@ func TestClient_HTTP(t *testing.T) {
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
encodedReq, err := json.Marshal(want)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(encodedReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
err = client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
})
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
arg := common.BytesToHash([]byte("foo"))
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
@@ -290,6 +418,78 @@ func TestClient_HTTP(t *testing.T) {
})
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()
t.Run("wrong terminal block hash", func(t *testing.T) {
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
// Change the terminal block hash.
h := common.BytesToHash([]byte("foo"))
resp.TerminalBlockHash = h[:]
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
})
t.Run("wrong terminal total difficulty", func(t *testing.T) {
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
// Change the terminal block hash.
resp.TerminalTotalDifficulty = "0x1"
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
})
}
type customError struct {
code int
}
@@ -403,39 +603,48 @@ func fixtures() map[string]interface{} {
StateRoot: foo[:],
ReceiptsRoot: foo[:],
LogsBloom: baz,
Random: foo[:],
PrevRandao: foo[:],
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: foo[:],
BaseFeePerGas: baseFeePerGas.Bytes(),
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
}
number := bytesutil.PadTo([]byte("100"), fieldparams.RootLength)
hash := bytesutil.PadTo([]byte("hash"), fieldparams.RootLength)
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
transactionsRoot := bytesutil.PadTo([]byte("transactionsRoot"), fieldparams.RootLength)
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
executionBlock := &pb.ExecutionBlock{
Number: []byte("100"),
Hash: []byte("hash"),
ParentHash: []byte("parentHash"),
Sha3Uncles: []byte("sha3Uncles"),
Miner: []byte("miner"),
StateRoot: []byte("sha3Uncles"),
TransactionsRoot: []byte("transactionsRoot"),
ReceiptsRoot: []byte("receiptsRoot"),
LogsBloom: []byte("logsBloom"),
Difficulty: []byte("1"),
TotalDifficulty: []byte("2"),
Number: number,
Hash: hash,
ParentHash: parent,
Sha3Uncles: sha3Uncles,
Miner: miner,
StateRoot: stateRoot,
TransactionsRoot: transactionsRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
Difficulty: bytesutil.PadTo([]byte("1"), fieldparams.RootLength),
TotalDifficulty: "2",
GasLimit: 3,
GasUsed: 4,
Timestamp: 5,
Size: []byte("6"),
ExtraData: []byte("extraData"),
BaseFeePerGas: []byte("baseFeePerGas"),
Size: bytesutil.PadTo([]byte("6"), fieldparams.RootLength),
ExtraData: bytesutil.PadTo([]byte("extraData"), fieldparams.RootLength),
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
Transactions: [][]byte{foo[:]},
Uncles: [][]byte{foo[:]},
}
status := &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
Status: pb.PayloadStatus_VALID,
LatestValidHash: foo[:],
ValidationError: "",
}
@@ -444,11 +653,86 @@ func fixtures() map[string]interface{} {
Status: status,
PayloadId: &id,
}
forkChoiceSyncingResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_SYNCING,
LatestValidHash: nil,
},
PayloadId: &id,
}
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
},
PayloadId: &id,
}
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
LatestValidHash: nil,
},
PayloadId: &id,
}
forkChoiceInvalidResp := &ForkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID,
LatestValidHash: []byte("latestValidHash"),
},
PayloadId: &id,
}
b, _ := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
ttd, _ := uint256.FromBig(b)
transitionCfg := &pb.TransitionConfiguration{
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
TerminalTotalDifficulty: ttd.Hex(),
TerminalBlockNumber: big.NewInt(0).Bytes(),
}
validStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_VALID,
LatestValidHash: foo[:],
ValidationError: "",
}
inValidBlockHashStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
LatestValidHash: nil,
}
inValidTerminalBlockStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
LatestValidHash: nil,
}
acceptedStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
LatestValidHash: nil,
}
syncingStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_SYNCING,
LatestValidHash: nil,
}
invalidStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_INVALID,
LatestValidHash: foo[:],
}
unknownStatus := &pb.PayloadStatus{
Status: pb.PayloadStatus_UNKNOWN,
LatestValidHash: foo[:],
}
return map[string]interface{}{
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"PayloadStatus": status,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
"AcceptedStatus": acceptedStatus,
"SyncingStatus": syncingStatus,
"InvalidStatus": invalidStatus,
"UnknownStatus": unknownStatus,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
"TransitionConfiguration": transitionCfg,
}
}
@@ -456,8 +740,8 @@ type testEngineService struct{}
func (*testEngineService) NoArgsRets() {}
func (*testEngineService) BlockByHash(
_ context.Context, _ common.Hash,
func (*testEngineService) GetBlockByHash(
_ context.Context, _ common.Hash, _ bool,
) *pb.ExecutionBlock {
fix := fixtures()
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
@@ -467,7 +751,7 @@ func (*testEngineService) BlockByHash(
return item
}
func (*testEngineService) BlockByNumber(
func (*testEngineService) GetBlockByNumber(
_ context.Context, _ string, _ bool,
) *pb.ExecutionBlock {
fix := fixtures()
@@ -489,6 +773,17 @@ func (*testEngineService) GetPayloadV1(
return item
}
func (*testEngineService) ExchangeTransitionConfigurationV1(
_ context.Context, _ *pb.TransitionConfiguration,
) *pb.TransitionConfiguration {
fix := fixtures()
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) ForkchoiceUpdatedV1(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) *ForkchoiceUpdatedResponse {
@@ -497,6 +792,7 @@ func (*testEngineService) ForkchoiceUpdatedV1(
if !ok {
panic("not found")
}
item.Status.Status = pb.PayloadStatus_VALID
return item
}
@@ -504,9 +800,83 @@ func (*testEngineService) NewPayloadV1(
_ context.Context, _ *pb.ExecutionPayload,
) *pb.PayloadStatus {
fix := fixtures()
item, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
item, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
if !ok {
panic("not found")
}
return item
}
func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.PayloadAttributes, res *ForkchoiceUpdatedResponse) *Client {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
forkChoiceStateReq, err := json.Marshal(fcs)
require.NoError(t, err)
payloadAttrsReq, err := json.Marshal(att)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(forkChoiceStateReq),
))
require.Equal(t, true, strings.Contains(
jsonRequestString, string(payloadAttrsReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": res,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
client := &Client{}
client.rpc = rpcClient
return client
}
func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayload) *Client {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(payload)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": status,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
client := &Client{}
client.rpc = rpcClient
return client
}

View File

@@ -17,6 +17,23 @@ var (
ErrServer = errors.New("client error while processing request")
// ErrUnknownPayload corresponds to JSON-RPC code -32001.
ErrUnknownPayload = errors.New("payload does not exist or is not available")
// ErrUnknownPayloadStatus when the payload status is unknown.
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrUnsupportedScheme for unsupported URL schemes.
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")
// ErrMismatchTerminalBlockHash when the terminal block hash value received via
// the API mismatches Prysm's configuration value.
ErrMismatchTerminalBlockHash = errors.New("terminal block hash mismatch")
// ErrMismatchTerminalTotalDiff when the terminal total difficulty value received via
// the API mismatches Prysm's configuration value.
ErrMismatchTerminalTotalDiff = errors.New("terminal total difficulty mismatch")
// ErrAcceptedSyncingPayloadStatus when the status of the payload is syncing or accepted.
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
// ErrInvalidPayloadStatus when the status of the payload is invalid.
ErrInvalidPayloadStatus = errors.New("payload status is INVALID")
// ErrNilResponse when the response is nil.
ErrNilResponse = errors.New("nil response")
)

View File

@@ -0,0 +1,12 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks",
visibility = ["//visibility:public"],
deps = [
"//proto/engine/v1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],
)

View File

@@ -0,0 +1,50 @@
package mocks
import (
"context"
"github.com/ethereum/go-ethereum/common"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
Err error
ExecutionBlock *pb.ExecutionBlock
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]byte, error) {
return e.NewPayloadResp, nil
}
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte) (*pb.ExecutionPayload, error) {
return e.ExecutionPayload, nil
}
// ExchangeTransitionConfiguration --
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
return e.Err
}
// LatestExecutionBlock --
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
return e.ExecutionBlock, nil
}
// ExecutionBlockByHash --
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, _ common.Hash) (*pb.ExecutionBlock, error) {
return e.ExecutionBlock, nil
}

View File

@@ -19,11 +19,21 @@ func defaultConfig() *config {
}
}
// WithHTTPClient allows setting a custom HTTP client
// for the API connection.
func WithHTTPClient(httpClient *http.Client) Option {
// WithJWTSecret allows setting a JWT secret for authenticating
// the client via HTTP connections.
func WithJWTSecret(secret []byte) Option {
return func(c *Client) error {
c.cfg.httpClient = httpClient
if len(secret) == 0 {
return nil
}
authTransport := &jwtTransport{
underlyingTransport: http.DefaultTransport,
jwtSecret: secret,
}
c.cfg.httpClient = &http.Client{
Timeout: DefaultTimeout,
Transport: authTransport,
}
return nil
}
}

View File

@@ -428,27 +428,27 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) {
hash, err := s.BlockHashByHeight(ctx, blkNum)
bHash, err := s.BlockHashByHeight(ctx, blkNum)
if err != nil {
return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash")
}
if hash == [32]byte{} {
if bHash == [32]byte{} {
return [32]byte{}, 0, errors.Wrap(err, "got empty block hash")
}
timeStamp, err := s.BlockTimeByHeight(ctx, blkNum)
if err != nil {
return [32]byte{}, 0, errors.Wrap(err, "could not get block timestamp")
}
return hash, timeStamp, nil
return bHash, timeStamp, nil
}
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
hash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
if err != nil {
return err
}
s.checkForChainstart(ctx, hash, blkNum, timeStamp)
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
return nil
}

View File

@@ -333,7 +333,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
err = web3Service.ProcessETH1Block(context.Background(), big.NewInt(int64(logs[len(logs)-1].BlockNumber)))
require.NoError(t, err)
cachedDeposits := web3Service.ChainStartDeposits()
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
require.Equal(t, depositsReqForChainStart, len(cachedDeposits))
// Receive the chain started event.
@@ -425,7 +425,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
err = web3Service.processPastLogs(context.Background())
require.NoError(t, err)
cachedDeposits := web3Service.ChainStartDeposits()
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
requiredDepsForChainstart := depositsReqForChainStart + depositOffset
require.Equal(t, requiredDepsForChainstart, len(cachedDeposits), "Did not cache the chain start deposits correctly")
@@ -529,7 +529,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
err = web3Service.processPastLogs(context.Background())
require.NoError(t, err)
cachedDeposits := web3Service.ChainStartDeposits()
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
require.Equal(t, totalNumOfDeposits, len(cachedDeposits), "Did not cache the chain start deposits correctly")
// Receive the chain started event.

View File

@@ -40,6 +40,14 @@ func WithExecutionEndpoint(endpoint string) Option {
}
}
// WithExecutionClientJWTSecret for authenticating the execution node JSON-RPC endpoint.
func WithExecutionClientJWTSecret(jwtSecret []byte) Option {
return func(s *Service) error {
s.cfg.executionEndpointJWTSecret = jwtSecret
return nil
}
}
// WithDepositContractAddress for the deposit contract.
func WithDepositContractAddress(addr common.Address) Option {
return func(s *Service) error {

View File

@@ -30,8 +30,10 @@ import (
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
nativev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
@@ -79,7 +81,6 @@ var (
// ChainStartFetcher retrieves information pertaining to the chain start event
// of the beacon chain for usage across various services.
type ChainStartFetcher interface {
ChainStartDeposits() []*ethpb.Deposit
ChainStartEth1Data() *ethpb.Eth1Data
PreGenesisState() state.BeaconState
ClearPreGenesisData()
@@ -126,17 +127,18 @@ type RPCClient interface {
// config defines a config struct for dependencies into the service.
type config struct {
depositContractAddr common.Address
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
stateNotifier statefeed.Notifier
stateGen *stategen.State
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
executionEndpoint string
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
depositContractAddr common.Address
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
stateNotifier statefeed.Notifier
stateGen *stategen.State
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
executionEndpoint string
executionEndpointJWTSecret []byte
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
}
// Service fetches important information about the canonical
@@ -155,7 +157,7 @@ type Service struct {
headTicker *time.Ticker
httpLogger bind.ContractFilterer
eth1DataFetcher RPCDataFetcher
engineAPIClient *engine.Client
engineAPIClient engine.Caller
rpcClient RPCClient
headerCache *headerCache // cache to store block hash/block height.
latestEth1Data *ethpb.LatestETH1Data
@@ -215,6 +217,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return nil, errors.Wrap(err, "unable to initialize engine API client")
}
// Check transition configuration for the engine API client in the background.
go s.checkTransitionConfiguration(ctx)
if err := s.ensureValidPowchainData(ctx); err != nil {
return nil, errors.Wrap(err, "unable to validate powchain data")
}
@@ -269,16 +274,14 @@ func (s *Service) Stop() error {
return nil
}
// ChainStartDeposits returns a slice of validator deposit data processed
// by the deposit contract and cached in the powchain service.
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
return s.chainStartData.ChainstartDeposits
}
// ClearPreGenesisData clears out the stored chainstart deposits and beacon state.
func (s *Service) ClearPreGenesisData() {
s.chainStartData.ChainstartDeposits = []*ethpb.Deposit{}
s.preGenesisState = &v1.BeaconState{}
if features.Get().EnableNativeState {
s.preGenesisState = &nativev1.BeaconState{}
} else {
s.preGenesisState = &v1.BeaconState{}
}
}
// ChainStartEth1Data returns the eth1 data at chainstart.
@@ -299,15 +302,12 @@ func (s *Service) Status() error {
return nil
}
// get error from run function
if s.runError != nil {
return s.runError
}
return nil
return s.runError
}
// EngineAPIClient returns the associated engine API client to interact
// with an execution node via JSON-RPC.
func (s *Service) EngineAPIClient() *engine.Client {
func (s *Service) EngineAPIClient() engine.Caller {
return s.engineAPIClient
}
@@ -376,45 +376,6 @@ func (s *Service) ETH1ConnectionErrors() []error {
return errs
}
// DepositRoot returns the Merkle root of the latest deposit trie
// from the ETH1.0 deposit contract.
func (s *Service) DepositRoot() [32]byte {
return s.depositTrie.HashTreeRoot()
}
// DepositTrie returns the sparse Merkle trie used for storing
// deposits from the ETH1.0 deposit contract.
func (s *Service) DepositTrie() *trie.SparseMerkleTrie {
return s.depositTrie
}
// LatestBlockHeight in the ETH1.0 chain.
func (s *Service) LatestBlockHeight() *big.Int {
return big.NewInt(int64(s.latestEth1Data.BlockHeight))
}
// LatestBlockHash in the ETH1.0 chain.
func (s *Service) LatestBlockHash() common.Hash {
return bytesutil.ToBytes32(s.latestEth1Data.BlockHash)
}
// AreAllDepositsProcessed determines if all the logs from the deposit contract
// are processed.
func (s *Service) AreAllDepositsProcessed() (bool, error) {
s.processingLock.RLock()
defer s.processingLock.RUnlock()
countByte, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
if err != nil {
return false, errors.Wrap(err, "could not get deposit count")
}
count := bytesutil.FromBytes8(countByte)
deposits := s.cfg.depositCache.AllDeposits(s.ctx, nil)
if count != uint64(len(deposits)) {
return false, nil
}
return true, nil
}
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
@@ -1098,7 +1059,10 @@ func (s *Service) initializeEngineAPIClient(ctx context.Context) error {
if s.cfg.executionEndpoint == "" {
return nil
}
client, err := engine.New(ctx, s.cfg.executionEndpoint)
opts := []engine.Option{
engine.WithJWTSecret(s.cfg.executionEndpointJWTSecret),
}
client, err := engine.New(ctx, s.cfg.executionEndpoint, opts...)
if err != nil {
return err
}

View File

@@ -16,7 +16,6 @@ go_library(
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/container/trie"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -25,11 +24,6 @@ func (_ *FaultyMockPOWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return 0, big.NewInt(0)
}
// LatestBlockHeight --
func (_ *FaultyMockPOWChain) LatestBlockHeight() *big.Int {
return big.NewInt(0)
}
// BlockExists --
func (f *FaultyMockPOWChain) BlockExists(_ context.Context, _ common.Hash) (bool, *big.Int, error) {
if f.HashesByHeight == nil {
@@ -54,21 +48,6 @@ func (_ *FaultyMockPOWChain) BlockByTimestamp(_ context.Context, _ uint64) (*typ
return &types.HeaderInfo{Number: big.NewInt(0)}, nil
}
// DepositRoot --
func (_ *FaultyMockPOWChain) DepositRoot() [32]byte {
return [32]byte{}
}
// DepositTrie --
func (_ *FaultyMockPOWChain) DepositTrie() *trie.SparseMerkleTrie {
return &trie.SparseMerkleTrie{}
}
// ChainStartDeposits --
func (_ *FaultyMockPOWChain) ChainStartDeposits() []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartEth1Data --
func (_ *FaultyMockPOWChain) ChainStartEth1Data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{}

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -58,11 +57,6 @@ func (m *POWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return uint64(GenesisTime), blk
}
// DepositTrie --
func (_ *POWChain) DepositTrie() *trie.SparseMerkleTrie {
return &trie.SparseMerkleTrie{}
}
// BlockExists --
func (m *POWChain) BlockExists(_ context.Context, hash common.Hash) (bool, *big.Int, error) {
// Reverse the map of heights by hash.
@@ -107,17 +101,6 @@ func (m *POWChain) BlockByTimestamp(_ context.Context, time uint64) (*types.Head
return &types.HeaderInfo{Number: chosenNumber, Time: chosenTime}, nil
}
// DepositRoot --
func (_ *POWChain) DepositRoot() [32]byte {
root := []byte("depositroot")
return bytesutil.ToBytes32(root)
}
// ChainStartDeposits --
func (_ *POWChain) ChainStartDeposits() []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartEth1Data --
func (m *POWChain) ChainStartEth1Data() *ethpb.Eth1Data {
return m.Eth1Data

View File

@@ -74,7 +74,7 @@ func handleGetSSZ(
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponse, errJson := apimiddleware.ProxyRequest(req)
grpcResponse, errJson := m.ProxyRequest(req)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true

View File

@@ -52,7 +52,6 @@ func TestGetSpec(t *testing.T) {
config.BellatrixForkEpoch = 101
config.ShardingForkVersion = []byte("ShardingForkVersion")
config.ShardingForkEpoch = 102
config.MinAnchorPowBlockDifficulty = 1000
config.BLSWithdrawalPrefixByte = byte('b')
config.GenesisDelay = 24
config.SecondsPerSlot = 25
@@ -131,7 +130,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 100, len(resp.Data))
assert.Equal(t, 98, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":

View File

@@ -275,7 +275,7 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
return nil, status.Errorf(codes.Internal, "Could not get exiting validator: %v", err)
}
alphaExit := migration.V1ExitToV1Alpha1(req)
err = blocks.VerifyExitAndSignature(validator, headState.Slot(), headState.Fork(), alphaExit, headState.GenesisValidatorRoot())
err = blocks.VerifyExitAndSignature(validator, headState.Slot(), headState.Fork(), alphaExit, headState.GenesisValidatorsRoot())
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Invalid voluntary exit: %v", err)
}

View File

@@ -34,7 +34,7 @@ func (bs *Server) GetGenesis(ctx context.Context, _ *emptypb.Empty) (*ethpb.Gene
if genesisTime.IsZero() {
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")
}
validatorRoot := bs.ChainInfoFetcher.GenesisValidatorRoot()
validatorRoot := bs.ChainInfoFetcher.GenesisValidatorsRoot()
if bytes.Equal(validatorRoot[:], params.BeaconConfig().ZeroHash[:]) {
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")
}

View File

@@ -56,7 +56,7 @@ func TestGetGenesis(t *testing.T) {
assert.ErrorContains(t, "Chain genesis info is not yet known", err)
})
t.Run("No genesis validator root", func(t *testing.T) {
t.Run("No genesis validators root", func(t *testing.T) {
chainService := &chainMock.ChainService{
Genesis: genesis,
ValidatorsRoot: [32]byte{},

View File

@@ -212,6 +212,10 @@ func (m *futureSyncMockFetcher) StateRoot(context.Context, []byte) ([]byte, erro
return m.BeaconStateRoot, nil
}
func (m *futureSyncMockFetcher) StateBySlot(context.Context, types.Slot) (state.BeaconState, error) {
return m.BeaconState, nil
}
func TestListSyncCommitteesFuture(t *testing.T) {
ctx := context.Background()
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -91,6 +91,7 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",

View File

@@ -2,6 +2,7 @@ package beacon
import (
"context"
"fmt"
"strconv"
types "github.com/prysmaticlabs/eth2-types"
@@ -58,9 +59,11 @@ func (bs *Server) ListValidatorAssignments(
if err != nil {
return nil, err
}
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
requestedState, err := bs.ReplayerBuilder.ForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve archived state for epoch %d: %v", requestedEpoch, err)
msg := fmt.Sprintf("could not replay all blocks from the closest stored state (at slot %d) "+
"to the requested epoch (%d) - %v", startSlot, requestedEpoch, err)
return nil, status.Error(codes.Internal, msg)
}
// Filter out assignments by public keys.

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
mockstategen "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen/mock"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -24,13 +25,13 @@ import (
)
func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) {
db := dbTest.SetupDB(t)
ctx := context.Background()
bs := &Server{
BeaconDB: db,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, db)
wanted := errNoEpochInfoError
_, err := bs.ListValidatorAssignments(
@@ -45,7 +46,6 @@ func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) {
}
func TestServer_ListAssignments_NoResults(t *testing.T) {
db := dbTest.SetupDB(t)
ctx := context.Background()
st, err := util.NewBeaconState()
@@ -62,6 +62,7 @@ func TestServer_ListAssignments_NoResults(t *testing.T) {
BeaconDB: db,
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
}
wanted := &ethpb.ValidatorAssignments{
Assignments: make([]*ethpb.ValidatorAssignments_CommitteeAssignment, 0),
@@ -101,8 +102,8 @@ func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) {
})
}
blk := util.NewBeaconBlock().Block
blockRoot, err := blk.HashTreeRoot()
blk := util.NewBeaconBlock()
blockRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
@@ -123,6 +124,7 @@ func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) {
},
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
wanted := fmt.Sprintf("page start %d >= list %d", 500, count)
@@ -176,8 +178,8 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
}
}
blk := util.NewBeaconBlock().Block
blockRoot, err := blk.HashTreeRoot()
b := util.NewBeaconBlock()
blockRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
@@ -198,6 +200,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
},
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
res, err := bs.ListValidatorAssignments(context.Background(), &ethpb.ListValidatorAssignmentsRequest{
@@ -246,8 +249,8 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T)
validators = append(validators, val)
}
blk := util.NewBeaconBlock().Block
blockRoot, err := blk.HashTreeRoot()
b := util.NewBeaconBlock()
blockRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -264,6 +267,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T)
},
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
pubKey1 := make([]byte, params.BeaconConfig().BLSPubkeyLength)
@@ -315,13 +319,16 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
validators = append(validators, val)
}
blk := util.NewBeaconBlock().Block
blockRoot, err := blk.HashTreeRoot()
b := util.NewBeaconBlock()
blockRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
w, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.SetValidators(validators))
require.NoError(t, db.SaveState(ctx, s, blockRoot))
require.NoError(t, db.SaveBlock(ctx, w))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blockRoot))
bs := &Server{
@@ -335,6 +342,8 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
StateGen: stategen.New(db),
}
addDefaultReplayerBuilder(bs, db)
req := &ethpb.ListValidatorAssignmentsRequest{Indices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}, PageSize: 2, PageToken: "1"}
res, err := bs.ListValidatorAssignments(context.Background(), req)
require.NoError(t, err)

View File

@@ -910,7 +910,7 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
},
},
}
domain, err := signing.Domain(headState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, headState.GenesisValidatorRoot())
domain, err := signing.Domain(headState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, headState.GenesisValidatorsRoot())
require.NoError(t, err)
encoded, err := signing.ComputeSigningRoot(attExample.Data, domain)
require.NoError(t, err)

View File

@@ -2,6 +2,7 @@ package beacon
import (
"context"
"fmt"
"strconv"
"github.com/pkg/errors"
@@ -478,10 +479,12 @@ func (bs *Server) GetWeakSubjectivityCheckpoint(ctx context.Context, _ *emptypb.
return nil, status.Error(codes.Internal, "Could not get weak subjectivity slot")
}
wsState, err := bs.StateGen.StateBySlot(ctx, wsSlot)
wsState, err := bs.ReplayerBuilder.ForSlot(wsSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get weak subjectivity state")
msg := fmt.Sprintf("error replaying blocks for state at slot %d: %v", wsSlot, err)
return nil, status.Error(codes.Internal, msg)
}
stateRoot, err := wsState.HashTreeRoot(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get weak subjectivity state root")

View File

@@ -12,9 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/cmd"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -728,77 +726,6 @@ func TestServer_StreamBlocksVerified_OnHeadUpdated(t *testing.T) {
<-exitRoutine
}
func TestServer_GetWeakSubjectivityCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
db := dbTest.SetupDB(t)
ctx := context.Background()
// Beacon state.
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(10))
// Active validator set is used for computing the weak subjectivity period.
numVals := 256 // Works with params.BeaconConfig().MinGenesisActiveValidatorCount as well, but takes longer.
validators := make([]*ethpb.Validator, numVals)
balances := make([]uint64, len(validators))
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 28 * 1e9,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
require.NoError(t, beaconState.SetValidators(validators))
require.NoError(t, beaconState.SetBalances(balances))
// Genesis block.
genesisBlock := util.NewBeaconBlock()
genesisBlockRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
require.NoError(t, db.SaveState(ctx, beaconState, genesisBlockRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
// Finalized checkpoint.
finalizedEpoch := types.Epoch(1020)
require.NoError(t, beaconState.SetSlot(types.Slot(finalizedEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))))
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{
Epoch: finalizedEpoch - 1,
Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
}))
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: finalizedEpoch,
Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
}))
chainService := &chainMock.ChainService{State: beaconState}
server := &Server{
Ctx: ctx,
BlockNotifier: chainService.BlockNotifier(),
HeadFetcher: chainService,
BeaconDB: db,
StateGen: stategen.New(db),
}
wsEpoch, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), beaconState)
require.NoError(t, err)
c, err := server.GetWeakSubjectivityCheckpoint(ctx, &emptypb.Empty{})
require.NoError(t, err)
e := finalizedEpoch - (finalizedEpoch % wsEpoch)
require.Equal(t, e, c.Epoch)
wsState, err := server.StateGen.StateBySlot(ctx, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(e)))
require.NoError(t, err)
sRoot, err := wsState.HashTreeRoot(ctx)
require.NoError(t, err)
require.DeepEqual(t, sRoot[:], c.StateRoot)
}
func TestServer_ListBeaconBlocks_NoResults(t *testing.T) {
db := dbTest.SetupDB(t)
ctx := context.Background()

View File

@@ -74,9 +74,9 @@ func (bs *Server) retrieveCommitteesForEpoch(
if err != nil {
return nil, nil, err
}
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
requestedState, err := bs.ReplayerBuilder.ForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
return nil, nil, status.Errorf(codes.Internal, "error replaying blocks for state at slot %d: %v", startSlot, err)
}
seed, err := helpers.Seed(requestedState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {

View File

@@ -3,6 +3,7 @@ package beacon
import (
"context"
"encoding/binary"
"math"
"testing"
"time"
@@ -11,6 +12,7 @@ import (
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
mockstategen "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen/mock"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -48,6 +50,8 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
require.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, db.SaveState(ctx, headState, gRoot))
bs.ReplayerBuilder = mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState))
activeIndices, err := helpers.ActiveValidatorIndices(ctx, headState, 0)
require.NoError(t, err)
attesterSeed, err := helpers.Seed(headState, 0, params.BeaconConfig().DomainBeaconAttester)
@@ -69,6 +73,13 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
}
}
func addDefaultReplayerBuilder(s *Server, h stategen.HistoryAccessor) {
cc := &mockstategen.MockCanonicalChecker{Is: true, Err: nil}
cs := &mockstategen.MockCurrentSlotter{Slot: math.MaxUint64 - 1}
s.ReplayerBuilder = stategen.NewCanonicalBuilder(h, cc, cs)
}
// TODO: test failure
func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -87,12 +98,13 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
require.NoError(t, headState.SetRandaoMixes(mixes))
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
b := util.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
gRoot, err := b.Block.HashTreeRoot()
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, wrapper.SetBlockSlot(b, headState.Slot()))
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, b))
gRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, headState, gRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
m := &mock.ChainService{
@@ -104,6 +116,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
GenesisTimeFetcher: m,
StateGen: stategen.New(db),
}
addDefaultReplayerBuilder(bs, db)
activeIndices, err := helpers.ActiveValidatorIndices(ctx, headState, 1)
require.NoError(t, err)

View File

@@ -45,4 +45,5 @@ type Server struct {
CollectedAttestationsBuffer chan []*ethpb.Attestation
StateGen stategen.StateManager
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
}

View File

@@ -2,6 +2,7 @@ package beacon
import (
"context"
"fmt"
"sort"
"strconv"
@@ -64,9 +65,9 @@ func (bs *Server) ListValidatorBalances(
if err != nil {
return nil, err
}
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
requestedState, err := bs.ReplayerBuilder.ForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", startSlot, err))
}
vals := requestedState.Validators()
@@ -219,7 +220,10 @@ func (bs *Server) ListValidators(
if err != nil {
return nil, err
}
reqState, err = bs.StateGen.StateBySlot(ctx, s)
reqState, err = bs.ReplayerBuilder.ForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", s, err))
}
} else {
reqState, err = bs.HeadFetcher.HeadState(ctx)
}
@@ -411,9 +415,9 @@ func (bs *Server) GetValidatorActiveSetChanges(
if err != nil {
return nil, err
}
requestedState, err := bs.StateGen.StateBySlot(ctx, s)
requestedState, err := bs.ReplayerBuilder.ForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", s, err))
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, requestedState, coreTime.CurrentEpoch(requestedState))
@@ -502,22 +506,11 @@ func (bs *Server) GetValidatorParticipation(
// Use the last slot of requested epoch to obtain current and previous epoch attestations.
// This ensures that we don't miss previous attestations when input requested epochs.
startSlot += params.BeaconConfig().SlotsPerEpoch - 1
// The start slot should be a canonical slot.
canonical, err := bs.isSlotCanonical(ctx, startSlot)
// ReplayerBuilder insures that a canonical chain is followed to the slot
beaconState, err := bs.ReplayerBuilder.ForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, err
}
// Keep looking back until there's a canonical slot.
for i := int(startSlot - 1); !canonical && i >= 0; i-- {
canonical, err = bs.isSlotCanonical(ctx, types.Slot(i))
if err != nil {
return nil, err
}
startSlot = types.Slot(i)
}
beaconState, err := bs.StateGen.StateBySlot(ctx, startSlot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", startSlot, err))
}
var v []*precompute.Validator
var b *precompute.Balance
@@ -834,9 +827,9 @@ func (bs *Server) GetIndividualVotes(
if err != nil {
return nil, err
}
requestedState, err := bs.StateGen.StateBySlot(ctx, s)
st, err := bs.ReplayerBuilder.ForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve archived state for epoch %d: %v", req.Epoch, err)
return nil, status.Errorf(codes.Internal, "failed to replay blocks for state at epoch %d: %v", req.Epoch, err)
}
// Track filtered validators to prevent duplication in the response.
filtered := map[types.ValidatorIndex]bool{}
@@ -844,7 +837,7 @@ func (bs *Server) GetIndividualVotes(
votes := make([]*ethpb.IndividualVotesRespond_IndividualVote, 0, len(req.Indices)+len(req.PublicKeys))
// Filter out assignments by public keys.
for _, pubKey := range req.PublicKeys {
index, ok := requestedState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok {
votes = append(votes, &ethpb.IndividualVotesRespond_IndividualVote{PublicKey: pubKey, ValidatorIndex: types.ValidatorIndex(^uint64(0))})
continue
@@ -864,27 +857,27 @@ func (bs *Server) GetIndividualVotes(
var v []*precompute.Validator
var bal *precompute.Balance
switch requestedState.Version() {
switch st.Version() {
case version.Phase0:
v, bal, err = precompute.New(ctx, requestedState)
v, bal, err = precompute.New(ctx, st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
}
v, _, err = precompute.ProcessAttestations(ctx, requestedState, v, bal)
v, _, err = precompute.ProcessAttestations(ctx, st, v, bal)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
case version.Altair:
v, bal, err = altair.InitializePrecomputeValidators(ctx, requestedState)
v, bal, err = altair.InitializePrecomputeValidators(ctx, st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)
}
v, _, err = altair.ProcessEpochParticipation(ctx, requestedState, bal, v)
v, _, err = altair.ProcessEpochParticipation(ctx, st, bal, v)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
default:
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", requestedState.Version())
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", st.Version())
}
for _, index := range filteredIndices {
@@ -892,7 +885,7 @@ func (bs *Server) GetIndividualVotes(
votes = append(votes, &ethpb.IndividualVotesRespond_IndividualVote{ValidatorIndex: index})
continue
}
val, err := requestedState.ValidatorAtIndexReadOnly(index)
val, err := st.ValidatorAtIndexReadOnly(index)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve validator: %v", err)
@@ -923,37 +916,6 @@ func (bs *Server) GetIndividualVotes(
}, nil
}
// isSlotCanonical returns true if the input slot has a canonical block in the chain,
// if the input slot has a skip block, false is returned,
// if the input slot has more than one block, an error is returned.
func (bs *Server) isSlotCanonical(ctx context.Context, slot types.Slot) (bool, error) {
if slot == 0 {
return true, nil
}
hasBlockRoots, roots, err := bs.BeaconDB.BlockRootsBySlot(ctx, slot)
if err != nil {
return false, err
}
if !hasBlockRoots {
return false, nil
}
// Loop through all roots in slot, and
// check which one is canonical.
for _, rt := range roots {
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, rt)
if err != nil {
return false, err
}
if canonical {
return true, nil
}
}
return false, nil
}
// Determines whether a validator has already exited.
func validatorHasExited(validator *ethpb.Validator, currentEpoch types.Epoch) bool {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch

View File

@@ -12,9 +12,11 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -115,6 +117,8 @@ func TestServer_ListValidatorBalances_NoResults(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs.ReplayerBuilder = mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState))
wanted := &ethpb.ValidatorBalances{
Balances: make([]*ethpb.ValidatorBalances_Balance, 0),
TotalSize: int32(0),
@@ -172,6 +176,7 @@ func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: st,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
}
res, err := bs.ListValidatorBalances(
ctx,
@@ -200,6 +205,7 @@ func TestServer_ListValidatorBalances_PaginationOutOfRange(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState)),
}
wanted := fmt.Sprintf("page start %d >= list %d", 200, len(headState.Balances()))
@@ -248,6 +254,7 @@ func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState)),
}
tests := []struct {
@@ -331,6 +338,7 @@ func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState)),
}
tests := []struct {
@@ -398,6 +406,7 @@ func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState)),
}
req := &ethpb.ListValidatorBalancesRequest{Indices: []types.ValidatorIndex{types.ValidatorIndex(1)}, QueryFilter: &ethpb.ListValidatorBalancesRequest_Epoch{Epoch: 0}}
@@ -660,6 +669,7 @@ func TestServer_ListValidatorBalances_UnknownValidatorInResponse(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(headState)),
}
nonExistentPubKey := [32]byte{8}
@@ -1009,47 +1019,41 @@ func TestServer_ListValidators_DefaultPageSize(t *testing.T) {
}
func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
params.OverrideBeaconConfig(params.MainnetConfig())
transition.SkipSlotCache.Disable()
ctx := context.Background()
slot := types.Slot(0)
epochs := 10
numVals := uint64(10)
numEpochs := types.Epoch(30)
validators := make([]*ethpb.Validator, numEpochs)
for i := types.Epoch(0); i < numEpochs; i++ {
validators[i] = &ethpb.Validator{
ActivationEpoch: i,
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
}
}
want := make([]*ethpb.Validators_ValidatorContainer, len(validators))
for i := 0; i < len(validators); i++ {
want[i] = &ethpb.Validators_ValidatorContainer{
Index: types.ValidatorIndex(i),
Validator: validators[i],
}
}
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(20*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, st.SetValidators(validators))
b := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
gRoot, err := b.Block.HashTreeRoot()
b.Block.Slot = slot
sb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, st, gRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, numVals)
require.NoError(t, st.SetSlot(slot))
require.Equal(t, int(numVals), len(st.Validators()))
beaconDB := dbTest.SetupDB(t)
require.NoError(t, beaconDB.SaveBlock(ctx, sb))
require.NoError(t, beaconDB.SaveState(ctx, st, r))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, r))
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 30
Genesis: time.Now().Add(time.Duration(-1*int64(30*secondsPerEpoch)) * time.Second),
Genesis: time.Now().Add(time.Duration(-1*int64(uint64(epochs)*secondsPerEpoch)) * time.Second),
},
StateGen: stategen.New(beaconDB),
}
addDefaultReplayerBuilder(bs, beaconDB)
req := &ethpb.ListValidatorsRequest{
QueryFilter: &ethpb.ListValidatorsRequest_Genesis{
@@ -1058,16 +1062,26 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
}
res, err := bs.ListValidators(context.Background(), req)
require.NoError(t, err)
assert.Equal(t, 30, len(res.ValidatorList))
assert.Equal(t, epochs, len(res.ValidatorList))
vals := st.Validators()
want := make([]*ethpb.Validators_ValidatorContainer, 0)
for i, v := range vals {
want = append(want, &ethpb.Validators_ValidatorContainer{
Index: types.ValidatorIndex(i),
Validator: v,
})
}
req = &ethpb.ListValidatorsRequest{
QueryFilter: &ethpb.ListValidatorsRequest_Epoch{
Epoch: 20,
Epoch: 10,
},
}
res, err = bs.ListValidators(context.Background(), req)
require.NoError(t, err)
assert.DeepSSZEqual(t, want, res.ValidatorList, "Incorrect number of validators")
require.Equal(t, len(want), len(res.ValidatorList), "incorrect number of validators")
assert.DeepSSZEqual(t, want, res.ValidatorList, "mismatch in validator values")
}
func TestServer_ListValidators_ProcessHeadStateSlots(t *testing.T) {
@@ -1270,8 +1284,8 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorActiveSetChanges(ctx, &ethpb.GetValidatorActiveSetChangesRequest{
QueryFilter: &ethpb.GetValidatorActiveSetChangesRequest_Genesis{Genesis: true},
})
@@ -1489,43 +1503,6 @@ func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T)
assert.ErrorContains(t, wanted, err)
}
func TestServer_GetValidatorParticipation_UnknownState(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(0))
epoch := types.Epoch(50)
slots := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch))
mockStateGen := &mockstategen.MockStateManager{
StatesBySlot: map[types.Slot]state.BeaconState{
0: (*v1.BeaconState)(nil),
},
}
bs := &Server{
BeaconDB: beaconDB,
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*int64(slots)) * time.Second),
},
StateGen: mockStateGen,
}
wanted := "Could not set up pre compute instance: failed to initialize precompute: nil inner state"
_, err = bs.GetValidatorParticipation(
ctx,
&ethpb.GetValidatorParticipationRequest{
QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{
Epoch: 1,
},
},
)
assert.ErrorContains(t, wanted, err)
}
func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
helpers.ClearCache()
beaconDB := dbTest.SetupDB(t)
@@ -1552,7 +1529,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
}}
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(2*params.BeaconConfig().SlotsPerEpoch-1))
require.NoError(t, headState.SetSlot(16))
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetBalances(balances))
require.NoError(t, headState.AppendCurrentEpochAttestations(atts[0]))
@@ -1567,6 +1544,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, params.BeaconConfig().ZeroHash))
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
@@ -1584,6 +1562,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorParticipation(ctx, &ethpb.GetValidatorParticipationRequest{QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{Epoch: 1}})
require.NoError(t, err)
@@ -1632,7 +1611,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
}}
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(2*params.BeaconConfig().SlotsPerEpoch-1))
require.NoError(t, headState.SetSlot(0))
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetBalances(balances))
require.NoError(t, headState.AppendCurrentEpochAttestations(atts[0]))
@@ -1641,11 +1620,10 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
b := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, params.BeaconConfig().ZeroHash))
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
@@ -1663,6 +1641,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorParticipation(ctx, &ethpb.GetValidatorParticipationRequest{QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{Epoch: 1}})
require.NoError(t, err)
@@ -1688,32 +1667,36 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochAltair(t *testing.T
beaconDB := dbTest.SetupDB(t)
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
transition.SkipSlotCache.Disable()
ctx := context.Background()
validatorCount := uint64(32)
genState, _ := util.DeterministicGenesisStateAltair(t, validatorCount)
c, err := altair.NextSyncCommittee(ctx, genState)
require.NoError(t, err)
require.NoError(t, genState.SetCurrentSyncCommittee(c))
bits := make([]byte, validatorCount)
for i := range bits {
bits[i] = 0xff
}
headState, _ := util.DeterministicGenesisStateAltair(t, validatorCount)
require.NoError(t, headState.SetSlot(2*params.BeaconConfig().SlotsPerEpoch-1))
require.NoError(t, headState.SetCurrentParticipationBits(bits))
require.NoError(t, headState.SetPreviousParticipationBits(bits))
require.NoError(t, genState.SetCurrentParticipationBits(bits))
require.NoError(t, genState.SetPreviousParticipationBits(bits))
b := util.NewBeaconBlockAltair()
b.Block.Slot = 16
ab, err := wrapper.WrappedAltairSignedBeaconBlock(b)
gsr, err := genState.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, ab))
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, wrapper.SetBlockStateRoot(gb, gsr))
require.NoError(t, err)
gRoot, err := gb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
m := &mock.ChainService{State: headState}
require.NoError(t, beaconDB.SaveState(ctx, genState, gRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, gb))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
m := &mock.ChainService{State: genState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
BeaconDB: beaconDB,
@@ -1722,15 +1705,11 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochAltair(t *testing.T
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
},
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorParticipation(ctx, &ethpb.GetValidatorParticipationRequest{QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{Epoch: 1}})
res, err := bs.GetValidatorParticipation(ctx, &ethpb.GetValidatorParticipationRequest{QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{Epoch: 0}})
require.NoError(t, err)
wanted := &ethpb.ValidatorParticipation{
@@ -1747,6 +1726,24 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochAltair(t *testing.T
}
assert.DeepEqual(t, true, res.Finalized, "Incorrect validator participation respond")
assert.DeepEqual(t, wanted, res.Participation, "Incorrect validator participation respond")
res, err = bs.GetValidatorParticipation(ctx, &ethpb.GetValidatorParticipationRequest{QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{Epoch: 1}})
require.NoError(t, err)
wanted = &ethpb.ValidatorParticipation{
GlobalParticipationRate: 1,
VotedEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
CurrentEpochActiveGwei: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
CurrentEpochAttestingGwei: params.BeaconConfig().EffectiveBalanceIncrement, // Empty because after one epoch, current participation rotates to previous
CurrentEpochTargetAttestingGwei: params.BeaconConfig().EffectiveBalanceIncrement,
PreviousEpochActiveGwei: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
PreviousEpochAttestingGwei: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
PreviousEpochTargetAttestingGwei: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
PreviousEpochHeadAttestingGwei: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
}
assert.DeepEqual(t, true, res.Finalized, "Incorrect validator participation respond")
assert.DeepEqual(t, wanted, res.Participation, "Incorrect validator participation respond")
}
func TestGetValidatorPerformance_Syncing(t *testing.T) {
@@ -2070,6 +2067,7 @@ func BenchmarkListValidatorBalances(b *testing.B) {
State: headState,
},
}
addDefaultReplayerBuilder(bs, beaconDB)
req := &ethpb.ListValidatorBalancesRequest{PageSize: 100}
b.StartTimer()
@@ -2114,15 +2112,16 @@ func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
var slot types.Slot = 0
validators := uint64(64)
stateWithValidators, _ := util.DeterministicGenesisState(t, validators)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetValidators(stateWithValidators.Validators()))
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, beaconState.SetSlot(slot))
b := util.NewBeaconBlock()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch
b.Block.Slot = slot
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -2134,6 +2133,7 @@ func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) {
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
// Test non exist public key.
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
@@ -2230,6 +2230,7 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
Indices: []types.ValidatorIndex{0, 1},
@@ -2268,9 +2269,10 @@ func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
var slot types.Slot = 0
validators := uint64(32)
beaconState, _ := util.DeterministicGenesisStateAltair(t, validators)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, beaconState.SetSlot(slot))
pb, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
@@ -2281,7 +2283,7 @@ func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) {
require.NoError(t, beaconState.SetPreviousParticipationBits(pb))
b := util.NewBeaconBlock()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch
b.Block.Slot = slot
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -2293,6 +2295,7 @@ func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) {
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
Indices: []types.ValidatorIndex{0, 1},
@@ -2378,6 +2381,7 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) {
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
Indices: []types.ValidatorIndex{0, 1},
@@ -2499,88 +2503,3 @@ func Test_validatorStatus(t *testing.T) {
})
}
}
func TestServer_isSlotCanonical(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
var roots [][32]byte
cRoots := map[[32]byte]bool{}
for i := 1; i < 100; i++ {
b := util.NewBeaconBlock()
b.Block.Slot = types.Slot(i)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
if i%2 == 0 {
cRoots[br] = true
}
roots = append(roots, br)
}
bs := &Server{
BeaconDB: beaconDB,
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: cRoots,
},
}
for i := range roots {
slot := types.Slot(i + 1)
c, err := bs.isSlotCanonical(ctx, slot)
require.NoError(t, err)
if slot%2 == 0 {
require.Equal(t, true, c)
} else {
require.Equal(t, false, c)
}
}
}
func TestServer_isSlotCanonical_MultipleBlocks(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
var roots [][32]byte
cRoots := map[[32]byte]bool{}
for i := 1; i < 100; i++ {
b := util.NewBeaconBlock()
b.Block.Slot = types.Slot(i)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
if i%2 == 0 {
cRoots[br] = true
// Save a block in the same slot
b = util.NewBeaconBlock()
b.Block.Slot = types.Slot(i)
b.Block.ProposerIndex = 100
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
}
roots = append(roots, br)
}
bs := &Server{
BeaconDB: beaconDB,
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: cRoots,
},
}
for i := range roots {
slot := types.Slot(i + 1)
c, err := bs.isSlotCanonical(ctx, slot)
require.NoError(t, err)
if slot%2 == 0 {
require.Equal(t, true, c)
} else {
require.Equal(t, false, c)
}
}
}
func TestServer_isSlotCanonicalForSlot0(t *testing.T) {
ctx := context.Background()
bs := &Server{}
c, err := bs.isSlotCanonical(ctx, 0)
require.NoError(t, err)
require.Equal(t, true, c)
}

View File

@@ -50,7 +50,9 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

Some files were not shown because too many files have changed in this diff Show More