Compare commits

...

65 Commits

Author SHA1 Message Date
prylabs-bulldozer[bot]
2a3783936c Merge refs/heads/develop into fixSkipSlotCache 2022-06-16 13:14:42 +00:00
Nishant Das
4de92bafc4 Improve Field Trie Recomputation (#10884)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-06-16 13:14:29 +00:00
prylabs-bulldozer[bot]
35cafbff46 Merge refs/heads/develop into fixSkipSlotCache 2022-06-16 04:29:46 +00:00
terencechain
69438583e5 Pad Uint256's SSZBytes to length 32 (#10889)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-16 04:29:32 +00:00
prylabs-bulldozer[bot]
2f134d0f9d Merge refs/heads/develop into fixSkipSlotCache 2022-06-16 01:11:22 +00:00
Raul Jordan
e81f3fed01 Remove Extraneous BoltDB Logs (#10888) 2022-06-16 01:11:07 +00:00
prylabs-bulldozer[bot]
09c99a937a Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 21:51:56 +00:00
Raul Jordan
1b2a5fb4a5 Update CODEOWNERS (#10887) 2022-06-15 21:51:44 +00:00
prylabs-bulldozer[bot]
cf40cc9486 Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 18:35:12 +00:00
Jie Hou
6c878b1665 Refactor: Continue reducing cognitive complexity (#10862)
* Refactor beacon-chain/db/kv/state.go

* Refactor api/gateway/apimiddleware/process_field.go

* Refactor beacon-chain/sync/initial-sync/blocks_queue.go

* Refactor validator/db/kv/migration_optimal_attester_protection.go

* goimports

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-06-15 18:34:59 +00:00
prylabs-bulldozer[bot]
0d246b1d3e Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 17:26:17 +00:00
james-prysm
838963c9f7 validator registration request bug: reusing public keys (#10883)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-15 17:26:05 +00:00
prylabs-bulldozer[bot]
e7c4ea4070 Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 16:30:10 +00:00
kasey
7b38f8b8fc submit lists of validator registrations (#10882)
* submit lists of validator registrations

* RegisterValidator to take a list

* Gazelle

* Fix go imports

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-15 16:29:56 +00:00
prylabs-bulldozer[bot]
6b334d3d9f Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 15:06:00 +00:00
Nishant Das
23e8e695cc Fix Sepolia Testnet Initialization (#10886) 2022-06-15 15:05:44 +00:00
prylabs-bulldozer[bot]
89997106bf Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 10:04:04 +00:00
Sammy Rosso
ce9eaae22e Add payload data logging (#10845)
* Add logging of block payload data

Added a new func logBlockPayloadData that includes logging of the
block number and the gas utilized.
Related to #10795.

* Replace Info with Debug + renamed func

Renamed the function to be clearer and replaced Info logging with Debug.
Related to #10795.

* Compute correct value for gas utilized

Related to #10795.

* Round result of gas utilized to 2 decimal places

* Add new error message

* Check if block is an Execution block

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Fix missing imports

* Undo changes

* Update beacon-chain/blockchain/receive_block.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Added error logging to log statements

Changed the error handling from log statements. Instead of returning the
error we log the error.

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-15 10:03:48 +00:00
prylabs-bulldozer[bot]
68264f90c9 Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 09:05:35 +00:00
Nishant Das
7010e8dec8 Graduate Prune Canonical Attestations Feature (#10623)
* graduate canonical prune feat

* fix test

* fix tests

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-15 09:05:19 +00:00
prylabs-bulldozer[bot]
4ffab1001d Merge refs/heads/develop into fixSkipSlotCache 2022-06-15 08:02:50 +00:00
Nishant Das
9e4ba75e71 Batch Scenario Runs Into Single Test (#10878)
* batch scenarios

* fix

* fix

* Update testing/endtoend/endtoend_test.go
2022-06-15 08:02:31 +00:00
prylabs-bulldozer[bot]
79b1cc8eba Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 23:23:39 +00:00
kasey
044a4ad5a3 Ignore genesis state url and checkpoint sync flags after first run of prysm (#10881)
* ignore remote genesis url flag if present in db

* ignore checkpoint sync flags if initialized

* lint

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 23:23:25 +00:00
prylabs-bulldozer[bot]
5a2269d957 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 21:52:14 +00:00
Radosław Kapka
690084cab6 Enable native state for Sepolia (#10880)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 21:51:58 +00:00
prylabs-bulldozer[bot]
b6c6053d39 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 18:43:52 +00:00
james-prysm
88db7117d2 Adding additional checksum check for fee recipient. (#10879)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 18:43:37 +00:00
prylabs-bulldozer[bot]
114802f3a4 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 17:47:26 +00:00
mick
1faa292615 Add is_optimistic to SyncDetails, hydrate via ValidateSync (#10692)
* cache test

* oh

* syntax fix

* error fix

* tinker

* tinker

* newlines?

* no-whitespace?

* feedback

* fix

* comment

* comments

* need to figure out how to lint locally...

* feedback

* fixes

* progress

* progress

* dedupe

* s

* working

* remove empty lines

* update test

* return errors properly

* make helpers publicly visible

* fix tests

Co-authored-by: rkapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-06-14 17:47:09 +00:00
prylabs-bulldozer[bot]
879eb2a0b2 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 14:50:21 +00:00
terencechain
434018a4b9 Add Sepolia config (#10868) 2022-06-14 14:50:05 +00:00
prylabs-bulldozer[bot]
ba264587e7 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 13:12:43 +00:00
Nishant Das
54624569bf Fix Fuzzing Failures in Our CI (#10875)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 13:12:28 +00:00
prylabs-bulldozer[bot]
9366350283 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 11:47:44 +00:00
Håvard Anda Estensen
b55ddb5a34 Use go:build lines and remove obsolete +build lines (#10704)
* Use go:build lines and remove obsolete +build lines

* Run gazelle

* Update crypto/bls/blst/stub.go

* Update crypto/bls/blst/stub.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-06-14 11:47:27 +00:00
prylabs-bulldozer[bot]
c75793642e Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 09:25:58 +00:00
terencechain
a38de90435 Move computeCheckpoints to private (#10874)
* Move computeCheckpoints to private

* Feedback

* Godoc

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 09:25:41 +00:00
prylabs-bulldozer[bot]
9b389dc234 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 08:26:01 +00:00
Michael Blau
d454d30f19 Merge ascii art banner (#10773)
* Add Merge ASCII art banner

* Add merge ASCII art banner

* gofmt

* Go fmt

* Fix go fmt again

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-06-14 08:25:42 +00:00
prylabs-bulldozer[bot]
7ac43eb097 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 06:19:50 +00:00
Jie Hou
b04dd9fe5c Enable gocognit linter (#10867)
* Enable gocognit linter

Currently the gocognit complexity threshold is set to 95 to make
sure no existing files will fail the linter. In future we will
reduce this threshold to a much lower one.

The recommended threshold is usually 30. Our code base has maximum
of 97 right now...But it's better late than never to pay attention
to our code compexity.

* Test to see github complains

* Resume to 97

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-06-14 14:19:34 +08:00
prylabs-bulldozer[bot]
ed95b6d886 Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 00:55:16 +00:00
kasey
8140a1a7e0 update info message about ws checkpoint (#10871)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-14 00:55:00 +00:00
prylabs-bulldozer[bot]
6678f02cdb Merge refs/heads/develop into fixSkipSlotCache 2022-06-14 00:19:21 +00:00
terencechain
cab9917317 Fix message typo for ErrorIs (#10873)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-06-14 00:19:04 +00:00
prylabs-bulldozer[bot]
06e99d6a04 Merge refs/heads/develop into fixSkipSlotCache 2022-06-13 22:29:42 +00:00
terencechain
4c4fb9f2c0 Fix gosec scan: G112 (CWE-400) Potential Slowloris Attack (#10872) 2022-06-13 22:29:26 +00:00
prylabs-bulldozer[bot]
dba6e6c411 Merge refs/heads/develop into fixSkipSlotCache 2022-06-13 15:18:02 +00:00
Mike Neuder
80f4f22401 Refactor validator accounts exit to remove cli context dependency (#10841)
* Refactor validator accounts exit to remove cli context dependency

* bazel run //:gazelle -- fix

* fixing deepsource findings

* fixing broken test

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-06-13 15:17:46 +00:00
prylabs-bulldozer[bot]
4bb8ad6972 Merge refs/heads/develop into fixSkipSlotCache 2022-06-11 17:37:51 +00:00
terencechain
dd296cbd8a Disallow lower justified epoch to override higher epoch (#10865) 2022-06-11 17:37:37 +00:00
prylabs-bulldozer[bot]
8f8e94850b Merge refs/heads/develop into fixSkipSlotCache 2022-06-11 15:54:47 +00:00
terencechain
f9e3b0a3c2 Active balance: return EFFECTIVE_BALANCE_INCREMENT as min (#10866) 2022-06-11 08:54:33 -07:00
prylabs-bulldozer[bot]
6d457b35e4 Merge refs/heads/develop into fixSkipSlotCache 2022-06-10 05:02:59 +00:00
terencechain
a58809597e Sync: don't process pending blocks w/o genesis time (#10750)
* Sync: don't process pending blocks w/o genesis time

* Update pending_blocks_queue.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-06-10 05:02:47 +00:00
Raul Jordan
9e339bed05 Merge branch 'develop' into fixSkipSlotCache 2022-06-10 04:05:06 +00:00
Nishant Das
7f443e8387 Add Optimistic Sync Scenario Testing (#10836)
* add latest changes

* fix it

* add multiclient support

* fix tests

* Apply suggestions from code review

* fix test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 23:24:53 +00:00
Potuz
18fc17c903 Forkchoice checkpoints (#10823)
* double_tree_changes

* protoarray changes

* beacon-chain changes

* spec tests and debug rpc fixes

* more conflicts

* more conflicts

* Terence's review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 22:28:30 +00:00
Sammy Rosso
d7b01b9d81 Fix default mainnet log when using chain config (#10855)
* Fix default mainnet log when using chain config

Add a log to specify the use of a chain-config-file rather than
defaulting to that of mainnet.
Related to #10821.

* Add more specific log message

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 21:31:59 +00:00
terencechain
7ebd9035dd Add ttd metric (#10851)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 20:35:38 +00:00
Radosław Kapka
578fea73d7 API's IsOptimistic - update header.StateRoot only when block is not missing (#10852)
* bug fix

* tests

* comment

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 19:35:35 +00:00
terencechain
7fcadbe3ef Add optimistic status to chainhead (#10842)
* Add optimistic status to chainhead

* Fix tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-09 17:37:52 +00:00
Radosław Kapka
fce9e6883d Native Blocks Ep. 1 - New types and functions (#10837)
* types and functions

* partially done tests

* refactor

* remaining Proto() tests

* remaining proto.go tests

* simplify UnmarshalSSZ and move BeaconBlockIsNil

* getters_test

* remove errAssertionFailed

* review feedback

* remove cloning protobuf

* fmt

* change IsNil

* fix tests
2022-06-09 13:13:02 +00:00
nisdas
9cba1012f2 add important skip slot check 2022-03-29 22:39:05 +08:00
243 changed files with 6438 additions and 2589 deletions

10
.github/CODEOWNERS vendored
View File

@@ -9,8 +9,8 @@ deps.bzl @prysmaticlabs/core-team
# Radek and Nishant are responsible for changes that can affect the native state feature.
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
/beacon-chain/state/v1/ @rkapka @nisdas
/beacon-chain/state/v2/ @rkapka @nisdas
/beacon-chain/state/v3/ @rkapka @nisdas
/beacon-chain/state/state-native/ @rkapka @nisdas
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan

View File

@@ -64,7 +64,6 @@ jobs:
- name: Golangci-lint
uses: golangci/golangci-lint-action@v2
with:
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
version: v1.45.2
skip-go-installation: true
@@ -88,11 +87,14 @@ jobs:
- name: Build
# Use blst tag to allow go and bazel builds for blst.
run: go build -v ./...
env:
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
# fuzz leverage go tag based stubs at compile time.
# Building and testing with these tags should be checked and enforced at pre-submit.
- name: Test for fuzzing
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
env:
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
# Tests run via Bazel for now...
# - name: Test

View File

@@ -1,69 +1,26 @@
linters-settings:
govet:
check-shadowing: true
settings:
printf:
funcs:
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
golint:
min-confidence: 0
gocyclo:
min-complexity: 10
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
depguard:
list-type: blacklist
packages:
# logging is allowed only by logutils.Log, logrus
# is allowed to use only in logutils package
- github.com/sirupsen/logrus
misspell:
locale: US
lll:
line-length: 140
goimports:
local-prefixes: github.com/golangci/golangci-lint
gocritic:
enabled-tags:
- performance
- style
- experimental
disabled-checks:
- wrapperFunc
run:
skip-files:
- validator/web/site_data.go
- .*_test.go
skip-dirs:
- proto
- tools/analyzers
timeout: 10m
go: '1.18'
linters:
disable-all: true
enable:
- deadcode
- goconst
- goimports
- golint
- gosec
- misspell
- structcheck
- typecheck
- unparam
- varcheck
- gofmt
- unused
disable-all: true
- errcheck
- gosimple
- gocognit
run:
skip-dirs:
- proto/
- ^contracts/
deadline: 10m
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 97
# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
service:
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
prepare:
- echo "here I can run custom commands, but no preparation needed for this repo"
output:
print-issued-lines: true
sort-results: true

View File

@@ -28,6 +28,7 @@ const (
)
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -199,9 +200,15 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
func (c *Client) RegisterValidator(ctx context.Context, svr *ethpb.SignedValidatorRegistrationV1) error {
v := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
body, err := json.Marshal(v)
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
if len(svr) == 0 {
return errors.Wrap(errMalformedRequest, "empty validator registration list")
}
vs := make([]*SignedValidatorRegistration, len(svr))
for i := 0; i < len(svr); i++ {
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
}
body, err := json.Marshal(vs)
if err != nil {
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
}

View File

@@ -73,7 +73,7 @@ func TestClient_Status(t *testing.T) {
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}`
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -104,7 +104,7 @@ func TestClient_RegisterValidator(t *testing.T) {
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
}
require.NoError(t, c.RegisterValidator(ctx, reg))
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}
func TestClient_GetHeader(t *testing.T) {

View File

@@ -63,7 +63,7 @@ func sszBytesToUint256(b []byte) Uint256 {
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
func (s Uint256) SSZBytes() []byte {
return bytesutil.ReverseByteOrder(s.Int.Bytes())
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")

View File

@@ -694,9 +694,10 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
}
func TestRoundTripUint256(t *testing.T) {
vs := "452312848583266388373324160190187140051835877600158453279131187530910662656"
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
u := stringToUint256(vs)
sb := u.SSZBytes()
require.Equal(t, 32, len(sb))
uu := sszBytesToUint256(sb)
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
require.Equal(t, vs, uu.String())

View File

@@ -31,26 +31,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
sliceElem := t.Field(i).Type.Elem()
kind := sliceElem.Kind()
// Recursively process slices to struct pointers.
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
switch {
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
for j := 0; j < v.Field(i).Len(); j++ {
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
// Process each string in string slices.
if kind == reflect.String {
case kind == reflect.String:
for _, proc := range processors {
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
if hasTag {
for j := 0; j < v.Field(i).Len(); j++ {
if err := proc.f(v.Field(i).Index(j)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
if !hasTag {
continue
}
for j := 0; j < v.Field(i).Len(); j++ {
if err := proc.f(v.Field(i).Index(j)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
}
// Recursively process struct pointers.
case reflect.Ptr:

View File

@@ -121,8 +121,9 @@ func (g *Gateway) Start() {
}
g.server = &http.Server{
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
ReadHeaderTimeout: time.Second,
}
go func() {

View File

@@ -10,6 +10,7 @@ go_library(
"head_sync_committee_info.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
"metrics.go",
"new_slot.go",
"options.go",

View File

@@ -80,9 +80,9 @@ func TestHeadRoot_Nil(t *testing.T) {
}
func TestService_ForkChoiceStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
p := c.ForkChoiceStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
@@ -327,7 +327,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
}
func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -357,7 +357,7 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -452,7 +452,7 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -472,7 +472,7 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -495,7 +495,7 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -510,7 +510,7 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -526,7 +526,7 @@ func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -591,7 +591,7 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
@@ -655,7 +655,7 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10

View File

@@ -1,5 +1,4 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -11,6 +11,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
bstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -43,7 +44,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -184,7 +185,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
fc := &ethpb.Checkpoint{Epoch: 0, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
@@ -289,7 +290,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
// Insert blocks into forkchoice
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -326,7 +327,9 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
headRoot, err := fcs.Head(ctx, bra, []uint64{50, 100, 200})
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
require.NoError(t, err)
require.Equal(t, brg, headRoot)
@@ -347,7 +350,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.ErrorIs(t, ErrInvalidPayload, err)
// Ensure Head is D
headRoot, err = fcs.Head(ctx, bra, service.justifiedBalances.balances)
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
@@ -364,7 +367,7 @@ func Test_NotifyNewPayload(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -582,7 +585,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -625,7 +628,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -827,7 +830,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
stateGen := stategen.New(beaconDB)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
@@ -927,7 +930,7 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -983,7 +986,7 @@ func TestService_getPayloadHash(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -95,16 +96,23 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
return [32]byte{}, err
}
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
s.cfg.ForkChoiceStore = doublylinkedtree.New()
} else {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
s.cfg.ForkChoiceStore = protoarray.New()
}
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
return [32]byte{}, err
}
}
return s.cfg.ForkChoiceStore.Head(ctx, headStartRoot, balances)
jc := &forkchoicetypes.Checkpoint{Epoch: j.Epoch, Root: headStartRoot}
fc := &forkchoicetypes.Checkpoint{Epoch: f.Epoch, Root: s.ensureRootNotZeros(bytesutil.ToBytes32(f.Root))}
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc); err != nil {
return [32]byte{}, err
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fc); err != nil {
return [32]byte{}, err
}
return s.cfg.ForkChoiceStore.Head(ctx, balances)
}
// This saves head info to the local service cache, it also saves the

View File

@@ -174,10 +174,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
_, err = service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
}
func Test_notifyNewHeadEvent(t *testing.T) {
@@ -613,7 +611,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -630,7 +628,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcp := &ethpb.Checkpoint{
Root: bellatrixBlkRoot[:],
Epoch: 1,
Epoch: 0,
}
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -80,3 +82,30 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
}).Info("Synced new block")
return nil
}
// logs payload related data every slot.
func logPayload(block interfaces.BeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil
}
payload, err := block.Body().ExecutionPayload()
if err != nil {
return err
}
if payload.GasLimit == 0 {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}).Debug("Synced new payload")
return nil
}

View File

@@ -0,0 +1,81 @@
package blockchain
var mergeAsciiArt = `
+?$$$$$$?*; ;*?$$$$?*; +!$$$$$$?!;
!##$???$@##$+ !&#@$??$&#@* +@#&$????$##+
!##; +@#&; !##* ;$##* @#$ ;&#+
!##; !##+ ;##$ @#@ $#&* ++;
!##; ;@#&; *##* ?##; ;$##&$!+;
!##?!!!?$##$+ !##+ !##+ ;!$@##&$!;
!##@@@@$$!+ *##* ?##; ;*?@#&!
!##; ;##$ @#@ ;?$; ?##+
!##; ?##! ;?##+ ;##+ ;$#&;
!##; !&#&$??$&#@* ;&#&$$??$$&#@+
+??; ;*?$$$$?+ ;+!?$$$$$!+
;;;;
;+!??$$$?!*+; ;*?$@&&&&&@@$!*;
*?@############&$?+ ;!@###############&$!;
;!@####&@$????$$@#####@! ;?&####$?*++;++*!?@####&?;
*@###&$*; ;*$&###@* *&###@!; ;!@###&!
!###&!; ;?&###? *####! *&###?
!###@+ ;$###$ +###&+ ;$###?
;###&; $###? ;;+*!??$$$$$$$$??!$###* ;@###*
!###! &###?$@&#####################@$?!+; +###$
$###+ ;*?&#################################&$?*; &##&;
$###+ ;!$&########################################&$!; &###;
*###? ;!$################################################$*; +###@
;@###+ +$&####################################################&?; $###!
+###&+ *$##########################################################$+ ;$###$;
*&###?; +$##############################################################?*@###$;
+$###&?+ ;$#####################################################################?;
*@####@&#####################################################################*
+$&##################@?!*++*!$&###################&$?*++*!?$###############&*
$###############&?+ ;!@###############@!; ;!@##############!
;$##############&!; *&###########&!; !&#############!
$##############@+ +@* ;$#########$; +@* ;$#############!
?##############$; *###* $#######$; +&##! ?#############+
+##############$ !#####! $#####@; *#####? ?############@;
@#############@; !#######! ;&####+ *#######? $############?
+##############+ ?#########? $###$ !#########$ +############&;
$#############$ ;$###########? !###? !###########$; $############!
@#############* !#############! ?###$ *#############? +############@
;&############&; +?@#######&$+; ;&####; ;+?@#######&?+; @############;
;#############@ +$&#&$*; ;$#####@; +$&#&$*; $############+
;#############$ *+ ;+; ;*; *&#######&! ;*; ;+; +*; $############*
&############@ ;$@!; +$@! ;?###########$; *@$* ;*$@+ $############*
$############&; ;$#&$*+!@##* +@#############@+ +&#@?++$&#@; @############+
*#############* $######&+ !#################? +@######$; +#############;
@############$ ?####@+ ;$###################$; ;@####$ $############@
*#############* !##@; +@#####################&* ;$##? +#############!
$#############+ *$; ?#########################?; $! +&############&;
;&#############! +$###########################@+ *&#############?
+##############$*; ;?###############################$+ *$##############@;
*###############&$?!!$###################################$?!?$&################+
*###################################&@$$$$@&#################################!
+&##############################&?+; ;+?&#############################?
;$############################@; ;@###########################!
?###########################* *##########################*
+@#############&$!+$#######? ?########$+!$&###########@+
!&###########&; $#######$+ +$########? +&##########?;
;?###########&* *@#######@$!; ;$@########@* *##########$+
;?&##########?; ;*$&####&$* ;!$&####&$* ;$#########@*
;!@#########@!; ;++*+; ;*; ;+*++; ;!&########$*
*$&########&$*; ;*$&#&$*; +*$&#######&?+
;*$&#########&@$$@&#########&@$$@&########&$*;
;+?$&##############################&$?+;
;+!?$@&###################&$$!+;
;++*!??$$$$$$$?!!*+;
;;; ;;+*++; ;;;++;;;++;;; ;+++;;;++++; ;;; ;; ;;; ;;;++;;;+++;; ;;;+++++++; ;+++++;
;@#&+ +$&&@$@&#? @#@@@&#&@@@#$ ;$@@@@#&@@@@! !#@ !#$ +&#&; !#&@@@#&@@@&&; !#&@@@@@@@* $#&@@@&&$+
$#?#@; ?#&!; *#$ &&;;;$#!;;*#$ ;;;*#@;;;; $#? +#&; ;@#?#$ !#!;;*#@;;;@#; ?#$;;;;;;; @#* ;!&#?
*#$ ?#$ *#&; ;+; ++ $#! ;+; *#$ ;&#+ $#* ?#? $#! ;+; +#@ ++ ?#$ $#* ;&#*
;&&; @#* $#$ $#! *#$ !#@; !#$ +#@ ;&#; +#@ ?#@??????! $#* $#$
$#!;;;*#&; $#? $#! *#$ $#? ;&&; ;@#*;;;!#$ +#@ ?#@??????! $#* $#$
!##@@@@@&#$ !#@; $#! *#$ ;&#+ $#* ?#&@@@@@##! +#@ ?#$ $#* @#*
;&&+;;;;;;@#* ;$#$+ @$ $#! *#$ *#@!#? *#@;;;;;;+##+ +#@ ?#$ $#* +$#$
$#* +#&; ;?&#@$$$$#$ +$$&#@$$; ;$$$$@##$$$$* $##@; ;&#+ !#@; $$@##$$* ?#&$$$$$$$! @#@$$$@&@!
;** +*; +*!?!*+; ;******* ;***********+ ;**; ;*+ **; *******+ ;*********+ +*!!!!*;
`

View File

@@ -13,7 +13,7 @@ import (
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -5,7 +5,9 @@ import (
"context"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -68,7 +70,8 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(bj); err != nil {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: bj.Epoch, Root: bytesutil.ToBytes32(bj.Root)}); err != nil {
return err
}
}

View File

@@ -20,7 +20,7 @@ import (
func TestService_newSlot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -78,6 +78,8 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
}).Info("Validated terminal block")
log.Info(mergeAsciiArt)
return nil
}

View File

@@ -109,7 +109,7 @@ func Test_validateMergeBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -152,7 +152,7 @@ func Test_validateMergeBlock(t *testing.T) {
func Test_getBlkParentHashAndTD(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -9,6 +9,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -28,7 +29,7 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
@@ -140,7 +141,7 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
@@ -250,7 +251,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -278,7 +279,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -487,7 +488,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -522,7 +523,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -610,7 +611,9 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
_, err = service.cfg.ForkChoiceStore.Head(ctx, r32, []uint64{})
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc))
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)

View File

@@ -222,10 +222,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
// Update Forkchoice checkpoints
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(psj); err != nil {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: psj.Epoch, Root: bytesutil.ToBytes32(psj.Root)}); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(psf); err != nil {
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: psf.Epoch, Root: bytesutil.ToBytes32(psf.Root)}); err != nil {
return err
}
}
@@ -441,8 +443,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
JustifiedEpoch: jCheckpoints[i].Epoch,
FinalizedEpoch: fCheckpoints[i].Epoch}
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[len(blks)-i-1] = args
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
@@ -461,7 +463,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
// Prune forkchoice store only if the new finalized checkpoint is higher
// than the finalized checkpoint in forkchoice store.
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedEpoch() {
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch {
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
return errors.Wrap(err, "could not prune fork choice nodes")
}
@@ -543,7 +545,8 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fCheckpoint); err != nil {
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: fCheckpoint.Epoch, Root: bytesutil.ToBytes32(fCheckpoint.Root)}); err != nil {
return err
}
}
@@ -656,10 +659,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
if !features.Get().CorrectlyPruneCanonicalAtts {
return nil
}
canonical, err := s.IsCanonical(ctx, r)
if err != nil {
return err

View File

@@ -218,7 +218,8 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
// Update forkchoice's justified checkpoint
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(cpt); err != nil {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: cpt.Epoch, Root: bytesutil.ToBytes32(cpt.Root)}); err != nil {
return err
}
}
@@ -244,7 +245,8 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(cp)
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: cp.Epoch, Root: bytesutil.ToBytes32(cp.Root)})
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
@@ -360,7 +362,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return err
}
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
JustifiedEpoch: jCheckpoint.Epoch, FinalizedEpoch: fCheckpoint.Epoch})
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
// As long as parent node is not in fork choice store, and parent node is in DB.
root := bytesutil.ToBytes32(blk.ParentRoot())
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
@@ -373,8 +375,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
}
root = bytesutil.ToBytes32(b.Block().ParentRoot())
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
JustifiedEpoch: jCheckpoint.Epoch,
FinalizedEpoch: fCheckpoint.Epoch}
JustifiedCheckpoint: jCheckpoint,
FinalizedCheckpoint: fCheckpoint}
pendingNodes = append(pendingNodes, args)
}
if len(pendingNodes) == 1 {

View File

@@ -21,11 +21,11 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
@@ -47,7 +47,7 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -150,7 +150,7 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -253,7 +253,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := doublylinkedtree.New()
opts := []Option{
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
@@ -268,7 +268,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
SecondsIntoSlot: 0,
}
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
_, err = service.cfg.ForkChoiceStore.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
}
@@ -293,7 +293,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -340,7 +340,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
require.NoError(t, err)
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedEpoch())
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
}
func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
@@ -362,7 +362,7 @@ func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -427,7 +427,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -474,7 +474,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedEpoch())
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
@@ -496,7 +496,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
bState := st.Copy()
@@ -577,7 +577,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -610,7 +610,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -657,7 +657,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -694,7 +694,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -728,7 +728,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
@@ -759,7 +759,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -805,7 +805,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -851,7 +851,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -898,7 +898,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -947,7 +947,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -996,7 +996,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1063,7 +1063,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1131,7 +1131,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testin
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1272,7 +1272,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1363,7 +1363,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1425,7 +1425,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1570,7 +1570,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -1579,6 +1579,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -1626,7 +1627,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -1645,7 +1646,7 @@ func TestOnBlock_NilBlock(t *testing.T) {
func TestOnBlock_InvalidSignature(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -1686,7 +1687,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -1695,6 +1696,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -1817,11 +1819,6 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyPruneCanonicalAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
@@ -1843,11 +1840,6 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
}
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyPruneCanonicalAtts: true,
})
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
@@ -1924,12 +1916,13 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -2052,7 +2045,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -2103,7 +2096,7 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -2112,6 +2105,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -2175,7 +2169,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New()
}
}
@@ -2183,7 +2177,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -71,11 +71,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
// Log block sync status.
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
return err
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
if err := logPayload(blockCopy.Block()); err != nil {
log.WithError(err).Error("Unable to log debug block payload data")
}
// Log state transition data.
if err := logStateTransitionData(blockCopy.Block()); err != nil {
return err
log.WithError(err).Error("Unable to log state transition data")
}
return nil

View File

@@ -127,7 +127,7 @@ func TestService_ReceiveBlock(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -168,7 +168,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -248,7 +248,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}

View File

@@ -23,6 +23,7 @@ import (
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
@@ -208,11 +209,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
var forkChoicer f.ForkChoicer
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
if features.Get().EnableForkChoiceDoublyLinkedTree {
forkChoicer = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
forkChoicer = doublylinkedtree.New()
} else {
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch)
forkChoicer = protoarray.New()
}
s.cfg.ForkChoiceStore = forkChoicer
if err := forkChoicer.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := forkChoicer.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint state")

View File

@@ -130,7 +130,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New()),
WithAttestationService(attService),
WithStateGen(stateGen),
}
@@ -505,7 +505,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
@@ -526,7 +526,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
@@ -599,7 +599,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
@@ -622,7 +622,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})

View File

@@ -62,6 +62,7 @@ type ChainService struct {
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
ReceiveBlockMockErr error
OptimisticCheckRootReceived [32]byte
}
// ForkChoicer mocks the same method in the chain service
@@ -447,7 +448,8 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
s.OptimisticCheckRootReceived = root
return s.Optimistic, nil
}

View File

@@ -30,8 +30,8 @@ type WeakSubjectivityVerifier struct {
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint" +
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil

View File

@@ -24,6 +24,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/network"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -101,7 +102,7 @@ func (s *Service) Status(ctx context.Context) error {
// RegisterValidator registers a validator with the builder relay network.
// It also saves the registration object to the DB.
func (s *Service) RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error {
func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error {
ctx, span := trace.StartSpan(ctx, "builder.RegisterValidator")
defer span.End()
start := time.Now()
@@ -109,13 +110,25 @@ func (s *Service) RegisterValidator(ctx context.Context, reg *ethpb.SignedValida
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
idx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(reg.Message.Pubkey))
if !exists {
return nil // If the pubkey is not found, it is not a validator. Do nothing.
idxs := make([]types.ValidatorIndex, 0)
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
for i := 0; i < len(reg); i++ {
r := reg[i]
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
if !exists {
// we want to allow validators to set up keys that haven't been added to the beaconstate validator list yet,
// so we should tolerate keys that do not seem to be valid by skipping past them.
log.Warnf("Skipping validator registration for pubkey=%#x - not in current validator set.", r.Message.Pubkey)
continue
}
idxs = append(idxs, nx)
msgs = append(msgs, r.Message)
valid = append(valid, r)
}
if err := s.c.RegisterValidator(ctx, reg); err != nil {
return errors.Wrap(err, "could not register validator")
if err := s.c.RegisterValidator(ctx, valid); err != nil {
return errors.Wrap(err, "could not register validator(s)")
}
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{idx}, []*ethpb.ValidatorRegistrationV1{reg.Message})
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
}

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass global committee caches.
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass proposer indices caches.
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,5 +1,4 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -39,6 +39,7 @@ go_test(
"attestation_test.go",
"justification_finalization_test.go",
"new_test.go",
"precompute_test.go",
"reward_penalty_test.go",
"slashing_test.go",
],

View File

@@ -149,14 +149,14 @@ func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*
finalizedCheckpoint := state.FinalizedCheckpoint()
// If 2/3 or more of the total balance attested in the current epoch.
if newBits.BitAt(0) {
if newBits.BitAt(0) && currentEpoch >= justifiedCheckpoint.Epoch {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
}
justifiedCheckpoint.Epoch = currentEpoch
justifiedCheckpoint.Root = blockRoot
} else if newBits.BitAt(1) {
} else if newBits.BitAt(1) && prevEpoch >= justifiedCheckpoint.Epoch {
// If 2/3 or more of total balance attested in the previous epoch.
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {

View File

@@ -250,3 +250,18 @@ func TestUnrealizedCheckpoints(t *testing.T) {
})
}
}
func Test_ComputeCheckpoints_CantUpdateToLower(t *testing.T) {
st, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{
Epoch: 2,
},
})
require.NoError(t, err)
jb := make(bitfield.Bitvector4, 1)
jb.SetBitAt(1, true)
cp, _, err := precompute.ComputeCheckpoints(st, jb)
require.NoError(t, err)
require.Equal(t, types.Epoch(2), cp.Epoch)
}

View File

@@ -0,0 +1,3 @@
package precompute
var ComputeCheckpoints = computeCheckpoints

View File

@@ -76,6 +76,8 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
return 0, err
}
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
return 0, err
}

View File

@@ -74,6 +74,27 @@ func TestTotalActiveBalance(t *testing.T) {
}
}
func TestTotalActiveBal_ReturnMin(t *testing.T) {
tests := []struct {
vCount int
}{
{1},
{10},
{10000},
}
for _, test := range tests {
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
}
state, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
}
}
func TestTotalActiveBalance_WithCache(t *testing.T) {
tests := []struct {
vCount int

View File

@@ -211,7 +211,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
return nil, err
}
if cachedState != nil && !cachedState.IsNil() && cachedState.Slot() < slot {
if cachedState != nil && !cachedState.IsNil() && cachedState.Slot() <= slot {
highestSlot = cachedState.Slot()
state = cachedState
}
@@ -220,7 +220,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
if err != nil {
return nil, err
}
if cachedState != nil && !cachedState.IsNil() && cachedState.Slot() < slot {
if cachedState != nil && !cachedState.IsNil() && cachedState.Slot() <= slot {
highestSlot = cachedState.Slot()
state = cachedState
}

View File

@@ -105,7 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
}
}
datafile := KVStoreDatafilePath(dirPath)
start := time.Now()
log.Infof("Opening Bolt DB at %s", datafile)
boltDB, err := bolt.Open(
datafile,
@@ -116,40 +115,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
},
)
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
if errors.Is(err, bolt.ErrTimeout) {
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
}
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
boltDB.AllocSize = boltAllocSize
start = time.Now()
log.Infof("Creating block cache...")
blockCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000, // number of keys to track frequency of (1000).
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
start = time.Now()
log.Infof("Creating validator cache...")
validatorCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
kv := &Store{
db: boltDB,
@@ -159,8 +147,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
stateSummaryCache: newStateSummaryCache(),
ctx: ctx,
}
start = time.Now()
log.Infof("Updating DB and creating buckets...")
if err := kv.db.Update(func(tx *bolt.Tx) error {
return createBuckets(
tx,
@@ -195,13 +181,9 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
registrationBucket,
)
}); err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
err = prometheus.Register(createBoltCollector(kv.db))
return kv, err
}

View File

@@ -162,12 +162,27 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
if states == nil {
return errors.New("nil state")
}
validatorKeys, validatorsEntries, err := getValidators(states)
if err != nil {
return err
}
if err := s.db.Update(func(tx *bolt.Tx) error {
return s.saveStatesEfficientInternal(ctx, tx, blockRoots, states, validatorKeys, validatorsEntries)
}); err != nil {
return err
}
return nil
}
func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*ethpb.Validator, error) {
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
for i, st := range states {
pb, ok := st.InnerStateUnsafe().(withValidators)
if !ok {
return errors.New("could not cast state to interface with GetValidators()")
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
}
validators := pb.GetValidators()
@@ -177,7 +192,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
// create the unique hash for that validator entry.
hash, hashErr := val.HashTreeRoot()
if hashErr != nil {
return hashErr
return nil, nil, hashErr
}
hashes = append(hashes, hash[:]...)
@@ -187,117 +202,113 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
}
validatorKeys[i] = snappy.Encode(nil, hashes)
}
return validatorKeys, validatorsEntries, nil
}
if err := s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket)
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
for i, rt := range blockRoots {
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
}
// There is a gap when the states that are passed are used outside this
// thread. But while storing the state object, we should not store the
// validator entries.To bring the gap closer, we empty the validators
// just before Put() and repopulate that state with original validators.
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].InnerStateUnsafe().(type) {
case *ethpb.BeaconState:
var pbState *ethpb.BeaconState
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
} else {
pbState, err = v1.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
encodedState, err := encode(ctx, pbState)
if err != nil {
return err
}
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateAltair:
var pbState *ethpb.BeaconStateAltair
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
} else {
pbState, err = v2.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateBellatrix:
var pbState *ethpb.BeaconStateBellatrix
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
} else {
pbState, err = v3.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
default:
return errors.New("invalid state type")
}
func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, blockRoots [][32]byte, states []state.ReadOnlyBeaconState, validatorKeys [][]byte, validatorsEntries map[string]*ethpb.Validator) error {
bucket := tx.Bucket(stateBucket)
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
for i, rt := range blockRoots {
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
}
// store the validator entries separately to save space.
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
}); err != nil {
return err
// There is a gap when the states that are passed are used outside this
// thread. But while storing the state object, we should not store the
// validator entries.To bring the gap closer, we empty the validators
// just before Put() and repopulate that state with original validators.
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].InnerStateUnsafe().(type) {
case *ethpb.BeaconState:
var pbState *ethpb.BeaconState
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
} else {
pbState, err = v1.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
encodedState, err := encode(ctx, pbState)
if err != nil {
return err
}
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateAltair:
var pbState *ethpb.BeaconStateAltair
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
} else {
pbState, err = v2.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateBellatrix:
var pbState *ethpb.BeaconStateBellatrix
var err error
if features.Get().EnableNativeState {
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
} else {
pbState, err = v3.ProtobufBeaconState(rawType)
}
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
default:
return errors.New("invalid state type")
}
}
return nil
// store the validator entries separately to save space.
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
}
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -16,7 +17,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -46,7 +47,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2
// |
// 3 <- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0")
@@ -58,8 +59,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- head
// |
// 3
f.store.justifiedEpoch = 1
r, err = f.Head(context.Background(), indexToHash(2), balances)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(2), Epoch: 1}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
@@ -71,8 +72,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- start
// |
// 3 <- head
f.store.justifiedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(3), balances)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
}
@@ -82,7 +83,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -143,7 +144,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
@@ -173,7 +174,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// head -> 9 10
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0")
@@ -203,19 +204,21 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
f.store.justifiedEpoch = 1
r, err = f.Head(context.Background(), indexToHash(1), balances)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
}
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
ctx := context.Background()
f := New(justifiedEpoch, finalizedEpoch)
f := New()
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, params.BeaconConfig().ZeroHash, justifiedEpoch, finalizedEpoch)
if err != nil {
return nil

View File

@@ -13,22 +13,22 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
func New() *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
@@ -47,7 +47,6 @@ func (f *ForkChoice) NodeCount() int {
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedRoot [32]byte,
justifiedStateBalances []uint64,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
@@ -73,10 +72,12 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
if err := f.store.treeRootNode.updateBestDescendant(ctx, f.store.justifiedEpoch, f.store.finalizedEpoch); err != nil {
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx, justifiedRoot)
return f.store.head(ctx)
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
@@ -326,20 +327,24 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
return node.setNodeAndParentValidated(ctx)
}
// JustifiedEpoch of fork choice store.
func (f *ForkChoice) JustifiedEpoch() types.Epoch {
return f.store.justifiedEpoch
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.justifiedCheckpoint
}
// FinalizedEpoch of fork choice store.
func (f *ForkChoice) FinalizedEpoch() types.Epoch {
return f.store.finalizedEpoch
// FinalizedCheckpoint of fork choice store.
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.finalizedCheckpoint
}
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
func (f *ForkChoice) ForkChoiceNodes() []*ethpb.ForkChoiceNode {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodeByRoot))
ret := make([]*ethpb.ForkChoiceNode, len(f.store.nodeByRoot))
return f.store.treeRootNode.rpcNodes(ret)
}
@@ -384,25 +389,25 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
}
}
// UpdateJustifiedCheckpoint sets the justified epoch to the given one
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *pbrpc.Checkpoint) error {
// UpdateJustifiedCheckpoint sets the justified checkpoint to the given one
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) error {
if jc == nil {
return errInvalidNilCheckpoint
}
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.justifiedEpoch = jc.Epoch
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.justifiedCheckpoint = jc
return nil
}
// UpdateFinalizedCheckpoint sets the finalized epoch to the given one
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
// UpdateFinalizedCheckpoint sets the finalized checkpoint to the given one
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) error {
if fc == nil {
return errInvalidNilCheckpoint
}
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.finalizedEpoch = fc.Epoch
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.finalizedCheckpoint = fc
return nil
}
@@ -473,7 +478,7 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
}
if err := f.store.insert(ctx,
b.Slot(), r, parentRoot, payloadHash,
chain[i].JustifiedEpoch, chain[i].FinalizedEpoch); err != nil {
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
return err
}
}

View File

@@ -211,7 +211,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()
h, err := f.store.head(ctx, [32]byte{'1'})
r1 := [32]byte{'1'}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: r1}
h, err := f.store.head(ctx)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'3'}, h)
require.DeepEqual(t, h, f.store.headNode.root)
@@ -296,7 +298,7 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
head, err := f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
head, err := f.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, head)
@@ -307,21 +309,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
head, err = f.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Insert two attestations for block b, one for c it becomes head
f.ProcessAttestation(ctx, []uint64{1, 2}, [32]byte{'b'}, 1)
f.ProcessAttestation(ctx, []uint64{3}, [32]byte{'c'}, 1)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, head)
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
@@ -330,7 +332,7 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
@@ -352,12 +354,14 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
f := setup(1, 1)
jr := [32]byte{'j'}
fr := [32]byte{'f'}
jc := &ethpb.Checkpoint{Root: jr[:], Epoch: 3}
fc := &ethpb.Checkpoint{Root: fr[:], Epoch: 2}
jc := &forkchoicetypes.Checkpoint{Root: jr, Epoch: 3}
fc := &forkchoicetypes.Checkpoint{Root: fr, Epoch: 2}
require.NoError(t, f.UpdateJustifiedCheckpoint(jc))
require.NoError(t, f.UpdateFinalizedCheckpoint(fc))
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
require.Equal(t, f.store.justifiedCheckpoint.Epoch, jc.Epoch)
require.Equal(t, f.store.justifiedCheckpoint.Root, jc.Root)
require.Equal(t, f.store.finalizedCheckpoint.Epoch, fc.Epoch)
require.Equal(t, f.store.finalizedCheckpoint.Root, fc.Root)
}
func TestStore_CommonAncestor(t *testing.T) {
@@ -562,8 +566,10 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
for i := uint64(2); i < 11; i++ {
blk := util.NewBeaconBlock()
blk.Block.Slot = types.Slot(i)
@@ -571,8 +577,10 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
blk.Block.ParentRoot = copiedRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
}

View File

@@ -15,7 +15,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
if r != params.BeaconConfig().ZeroHash {
t.Errorf("Incorrect head with genesis")
@@ -28,7 +28,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -39,7 +39,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -52,7 +52,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -65,7 +65,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -80,7 +80,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -92,7 +92,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error
_, err = f.Head(context.Background(), indexToHash(5), balances)
f.store.justifiedCheckpoint.Root = indexToHash(5)
_, err = f.Head(context.Background(), balances)
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
require.ErrorContains(t, wanted, err)
@@ -104,8 +105,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 4 3
// |
// 5 <- head
f.store.justifiedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(5), balances)
f.store.justifiedCheckpoint.Epoch = 2
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
@@ -122,7 +123,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -35,7 +35,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
headRoot, err := f.Head(ctx, zeroHash, balances)
headRoot, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, headRoot, "Incorrect head with genesis")
@@ -58,7 +58,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 1")
@@ -82,7 +82,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 2")
@@ -108,7 +108,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -144,7 +144,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -186,7 +186,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
@@ -195,7 +195,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -220,7 +220,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -239,7 +239,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure the head is C, the honest block.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -260,7 +260,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f.ProcessAttestation(ctx, votes, honestBlock, fEpoch)
// Ensure the head is STILL C, the honest block, as the honest block had proposer boost.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
})
@@ -268,7 +268,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -295,7 +295,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -314,7 +314,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is still the head after the malicious proposer reveals their block.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -333,7 +333,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
// Expect the head to have switched to B.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, maliciouslyWithheldBlock, r, "Expected B to become the head")
})
@@ -355,7 +355,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
a := zeroHash
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -374,7 +374,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
@@ -402,7 +402,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is still the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
@@ -426,7 +426,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// D cannot win without a boost.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Expected C to remain the head")
@@ -442,7 +442,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.BoostProposerRoot(ctx, args))
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, d, r, "Expected D to become the head")
})

View File

@@ -60,12 +60,12 @@ func (s *Store) PruneThreshold() uint64 {
// head starts from justified root and then follows the best descendant links
// to find the best block for head. This function assumes a lock on s.nodesLock
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
func (s *Store) head(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
defer span.End()
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[justifiedRoot]
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
return [32]byte{}, errUnknownJustifiedRoot
}
@@ -77,9 +77,9 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
bestDescendant = justifiedNode
}
if !bestDescendant.viableForHead(s.justifiedEpoch, s.finalizedEpoch) {
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedEpoch, bestDescendant.justifiedEpoch, s.justifiedEpoch)
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestDescendant.justifiedEpoch, s.justifiedCheckpoint.Epoch)
}
// Update metrics.
@@ -134,7 +134,8 @@ func (s *Store) insert(ctx context.Context,
}
} else {
parent.children = append(parent.children, n)
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
if err := s.treeRootNode.updateBestDescendant(ctx,
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
return err
}
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -22,13 +23,13 @@ func TestStore_PruneThreshold(t *testing.T) {
func TestStore_JustifiedEpoch(t *testing.T) {
j := types.Epoch(100)
f := setup(j, j)
require.Equal(t, j, f.JustifiedEpoch())
require.Equal(t, j, f.JustifiedCheckpoint().Epoch)
}
func TestStore_FinalizedEpoch(t *testing.T) {
j := types.Epoch(50)
f := setup(j, j)
require.Equal(t, j, f.FinalizedEpoch())
require.Equal(t, j, f.FinalizedCheckpoint().Epoch)
}
func TestStore_NodeCount(t *testing.T) {
@@ -78,7 +79,8 @@ func TestForkChoice_HasNode(t *testing.T) {
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
f := setup(0, 0)
_, err := f.store.head(context.Background(), [32]byte{'a'})
f.store.justifiedCheckpoint.Root = [32]byte{'a'}
_, err := f.store.head(context.Background())
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
@@ -90,7 +92,8 @@ func TestStore_Head_Itself(t *testing.T) {
// Since the justified node does not have a best descendant so the best node
// is itself.
h, err := f.store.head(context.Background(), indexToHash(1))
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)}
h, err := f.store.head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(1), h)
}
@@ -110,7 +113,8 @@ func TestStore_Head_BestDescendant(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
h, err := f.store.head(context.Background(), indexToHash(1))
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)}
h, err := f.store.head(context.Background())
require.NoError(t, err)
require.Equal(t, h, indexToHash(4))
}
@@ -133,7 +137,9 @@ func TestStore_Insert(t *testing.T) {
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc}
payloadHash := [32]byte{'a'}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")

View File

@@ -3,6 +3,7 @@ package doublylinkedtree
import (
"sync"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
)
@@ -17,8 +18,8 @@ type ForkChoice struct {
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
@@ -30,6 +31,7 @@ type Store struct {
slashedIndices map[types.ValidatorIndex]bool // the list of equivocating validator indices
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex
}
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.

View File

@@ -44,11 +44,11 @@ func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
for _, node := range f.store.nodeByRoot {
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedEpoch {
f.store.justifiedEpoch = node.justifiedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
f.store.justifiedCheckpoint.Epoch = node.justifiedEpoch
}
if node.finalizedEpoch > f.store.finalizedEpoch {
f.store.finalizedEpoch = node.finalizedEpoch
if node.finalizedEpoch > f.store.finalizedCheckpoint.Epoch {
f.store.finalizedCheckpoint.Epoch = node.finalizedEpoch
}
}
}

View File

@@ -4,9 +4,9 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -80,17 +80,17 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
// D is head even though its weight is lower.
hr := [32]byte{'d'}
state, blkRoot, err = prepareForkchoiceState(ctx, 103, hr, [32]byte{'b'}, [32]byte{'D'}, 2, 1)
ha := [32]byte{'a'}
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'b'}, [32]byte{'D'}, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 2, Root: hr[:]}))
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
require.NoError(t, f.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
@@ -98,7 +98,7 @@ func TestStore_LongFork(t *testing.T) {
// Update unrealized justification, c becomes head
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
}
@@ -157,30 +157,31 @@ func TestStore_NoDeadLock(t *testing.T) {
// Epoch 3
// Current Head is H
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(0), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// Insert Block I, it becomes Head
hr := [32]byte{'i'}
state, blkRoot, err = prepareForkchoiceState(ctx, 108, hr, [32]byte{'f'}, [32]byte{'I'}, 1, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 1, Root: hr[:]}))
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
ha := [32]byte{'a'}
require.NoError(t, f.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: ha}))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'i'}, headRoot)
require.Equal(t, types.Epoch(1), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.FinalizedEpoch())
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(0), f.FinalizedCheckpoint().Epoch)
// Realized Justified checkpoints, H becomes head
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(2), f.JustifiedEpoch())
require.Equal(t, types.Epoch(1), f.FinalizedEpoch())
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(1), f.FinalizedCheckpoint().Epoch)
}
// Epoch 1 | Epoch 2
@@ -225,10 +226,10 @@ func TestStore_ForkNextEpoch(t *testing.T) {
// Insert an attestation to H, H is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'h'}, 1)
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(0), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// D arrives late, D is head
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
@@ -236,10 +237,10 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, types.Epoch(1), f.JustifiedEpoch())
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -15,7 +16,7 @@ func TestVotes_CanFindHead(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -27,7 +28,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -39,7 +40,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -48,7 +49,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 1 <- +vote, new head
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 2)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(1), r, "Incorrect head for with justified epoch at 1")
@@ -57,7 +58,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// vote, new head -> 2 1
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 2)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -71,7 +72,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -82,7 +83,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 3 <- new vote
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(3), 3)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -93,7 +94,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 3 <- head
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(1), 3)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 1")
@@ -109,7 +110,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -127,7 +128,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -188,7 +189,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
@@ -209,9 +210,9 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9 <- head
f.store.justifiedEpoch = 2
f.store.finalizedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(5), balances)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -237,7 +238,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -246,28 +247,28 @@ func TestVotes_CanFindHead(t *testing.T) {
// The new validators voted for 10.
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
// The new head should be 10.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Set the balances of the last 2 validators to 0.
balances = []uint64{1, 1, 1, 0, 0}
// The head should be back to 9.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// The head should be back to 9.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
@@ -276,7 +277,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -300,7 +301,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -318,7 +319,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -23,7 +23,7 @@ type ForkChoicer interface {
// HeadRetriever retrieves head root and optimistic info of the current chain.
type HeadRetriever interface {
Head(context.Context, [32]byte, []uint64) ([32]byte, error)
Head(context.Context, []uint64) ([32]byte, error)
Tips() ([][32]byte, []types.Slot)
IsOptimistic(root [32]byte) (bool, error)
}
@@ -59,8 +59,8 @@ type Getter interface {
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedEpoch() types.Epoch
JustifiedEpoch() types.Epoch
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
ForkChoiceNodes() []*ethpb.ForkChoiceNode
NodeCount() int
}
@@ -69,6 +69,6 @@ type Getter interface {
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(*ethpb.Checkpoint) error
UpdateFinalizedCheckpoint(*ethpb.Checkpoint) error
UpdateJustifiedCheckpoint(*forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
"github.com/prysmaticlabs/prysm/config/params"
@@ -61,7 +62,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -91,7 +92,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2
// |
// 3 <- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0")
@@ -103,8 +104,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- head
// |
// 3
f.store.justifiedEpoch = 1
r, err = f.Head(context.Background(), indexToHash(2), balances)
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(2)}
f.store.justifiedCheckpoint = jc
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
@@ -116,8 +118,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- start
// |
// 3 <- head
f.store.justifiedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(3), balances)
jc = &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(3)}
f.store.justifiedCheckpoint = jc
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
}
@@ -127,7 +130,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -188,7 +191,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
@@ -218,7 +221,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// head -> 9 10
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0")
@@ -248,19 +251,22 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
f.store.justifiedEpoch = 1
r, err = f.Head(context.Background(), indexToHash(1), balances)
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
f.store.justifiedCheckpoint = jc
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
}
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
f := New(justifiedEpoch, finalizedEpoch)
f := New()
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.nodes = append(f.store.nodes, &Node{
slot: 0,
root: params.BeaconConfig().ZeroHash,
@@ -271,6 +277,5 @@ func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
bestDescendant: NonExistentNode,
weight: 0,
})
return f
}

View File

@@ -15,7 +15,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
if r != params.BeaconConfig().ZeroHash {
t.Errorf("Incorrect head with genesis")
@@ -28,7 +28,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -39,7 +39,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -52,7 +52,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -65,7 +65,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -80,7 +80,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -92,7 +92,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error
_, err = f.Head(context.Background(), indexToHash(5), balances)
f.store.justifiedCheckpoint.Root = indexToHash(5)
_, err = f.Head(context.Background(), balances)
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
require.ErrorContains(t, wanted, err)
@@ -104,8 +105,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 4 3
// |
// 5 <- head
f.store.justifiedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(5), balances)
f.store.justifiedCheckpoint.Epoch = 2
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
@@ -122,7 +123,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -35,7 +35,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
headRoot, err := f.Head(ctx, zeroHash, balances)
headRoot, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, headRoot, "Incorrect head with genesis")
@@ -57,7 +57,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 1")
@@ -81,7 +81,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 2")
@@ -107,7 +107,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -142,7 +142,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -180,7 +180,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
headRoot, err = f.Head(ctx, zeroHash, balances)
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
@@ -188,7 +188,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -213,7 +213,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -232,7 +232,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure the head is C, the honest block.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -253,7 +253,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f.ProcessAttestation(ctx, votes, honestBlock, fEpoch)
// Ensure the head is STILL C, the honest block, as the honest block had proposer boost.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
})
@@ -261,7 +261,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -288,7 +288,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -307,7 +307,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is still the head after the malicious proposer reveals their block.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
@@ -326,7 +326,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
// Expect the head to have switched to B.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, maliciouslyWithheldBlock, r, "Expected B to become the head")
})
@@ -348,7 +348,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
a := zeroHash
// The head should always start at the finalized block.
r, err := f.Head(ctx, zeroHash, balances)
r, err := f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
@@ -367,7 +367,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
@@ -395,7 +395,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Ensure C is still the head.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
@@ -419,7 +419,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// D cannot win without a boost.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, c, r, "Expected C to remain the head")
@@ -435,7 +435,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.BoostProposerRoot(ctx, args))
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx, zeroHash, balances)
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, d, r, "Expected D to become the head")
})

View File

@@ -29,17 +29,17 @@ const defaultPruneThreshold = 256
var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
func New() *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
@@ -49,11 +49,7 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
// Head returns the head root from fork choice store.
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedRoot [32]byte,
justifiedStateBalances []uint64,
) ([32]byte, error) {
func (f *ForkChoice) Head(ctx context.Context, justifiedStateBalances []uint64) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.Head")
defer span.End()
f.votesLock.Lock()
@@ -76,7 +72,7 @@ func (f *ForkChoice) Head(
}
f.balances = newBalances
return f.store.head(ctx, justifiedRoot)
return f.store.head(ctx)
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
@@ -273,14 +269,18 @@ func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// JustifiedEpoch of fork choice store.
func (f *ForkChoice) JustifiedEpoch() types.Epoch {
return f.store.justifiedEpoch
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.justifiedCheckpoint
}
// FinalizedEpoch of fork choice store.
func (f *ForkChoice) FinalizedEpoch() types.Epoch {
return f.store.finalizedEpoch
// FinalizedCheckpoint of fork choice store.
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.finalizedCheckpoint
}
// proposerBoost of fork choice store.
@@ -291,20 +291,23 @@ func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
}
// head starts from justified root and then follows the best descendant links
// to find the best block for head.
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
// to find the best block for head. It assumes the caller has a lock on nodes.
func (s *Store) head(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.head")
defer span.End()
// Justified index has to be valid in node indices map, and can not be out of bound.
justifiedIndex, ok := s.nodesIndices[justifiedRoot]
if s.justifiedCheckpoint == nil {
return [32]byte{}, errInvalidNilCheckpoint
}
justifiedIndex, ok := s.nodesIndices[s.justifiedCheckpoint.Root]
if !ok {
return [32]byte{}, errUnknownJustifiedRoot
}
if justifiedIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidJustifiedIndex
}
justifiedNode := s.nodes[justifiedIndex]
bestDescendantIndex := justifiedNode.bestDescendant
// If the justified node doesn't have a best descendant,
@@ -315,12 +318,11 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
if bestDescendantIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidBestDescendantIndex
}
bestNode := s.nodes[bestDescendantIndex]
if !s.viableForHead(bestNode) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedEpoch, bestNode.justifiedEpoch, s.justifiedEpoch)
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
}
// Update metrics.
@@ -743,10 +745,12 @@ func (s *Store) leadsToViableHead(node *Node) (bool, error) {
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) viableForHead(node *Node) bool {
s.checkpointsLock.RLock()
defer s.checkpointsLock.RUnlock()
// `node` is viable if its justified epoch and finalized epoch are the same as the one in `Store`.
// It's also viable if we are in genesis epoch.
justified := s.justifiedEpoch == node.justifiedEpoch || s.justifiedEpoch == 0
finalized := s.finalizedEpoch == node.finalizedEpoch || s.finalizedEpoch == 0
justified := s.justifiedCheckpoint.Epoch == node.justifiedEpoch || s.justifiedCheckpoint.Epoch == 0
finalized := s.finalizedCheckpoint.Epoch == node.finalizedEpoch || s.finalizedCheckpoint.Epoch == 0
return justified && finalized
}
@@ -857,25 +861,25 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
}
}
// UpdateJustifiedCheckpoint sets the justified epoch to the given one
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *pbrpc.Checkpoint) error {
// UpdateJustifiedCheckpoint sets the justified checkpoint to the given one
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) error {
if jc == nil {
return errInvalidNilCheckpoint
}
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.justifiedEpoch = jc.Epoch
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.justifiedCheckpoint = jc
return nil
}
// UpdateFinalizedCheckpoint sets the finalized epoch to the given one
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
// UpdateFinalizedCheckpoint sets the finalized checkpoint to the given one
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) error {
if fc == nil {
return errInvalidNilCheckpoint
}
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.finalizedEpoch = fc.Epoch
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.finalizedCheckpoint = fc
return nil
}
@@ -895,7 +899,7 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
}
if err := f.store.insert(ctx,
b.Slot(), r, parentRoot, payloadHash,
chain[i].JustifiedEpoch, chain[i].FinalizedEpoch); err != nil {
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
return err
}
}

View File

@@ -28,13 +28,13 @@ func TestStore_PruneThreshold(t *testing.T) {
func TestStore_JustifiedEpoch(t *testing.T) {
j := types.Epoch(100)
f := setup(j, j)
require.Equal(t, j, f.JustifiedEpoch())
require.Equal(t, j, f.JustifiedCheckpoint().Epoch)
}
func TestStore_FinalizedEpoch(t *testing.T) {
j := types.Epoch(50)
f := setup(j, j)
require.Equal(t, j, f.FinalizedEpoch())
require.Equal(t, j, f.FinalizedCheckpoint().Epoch)
}
func TestForkChoice_HasNode(t *testing.T) {
@@ -51,8 +51,9 @@ func TestForkChoice_HasNode(t *testing.T) {
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{'a'}}
_, err := s.head(context.Background(), [32]byte{})
_, err := s.head(context.Background())
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
@@ -61,8 +62,9 @@ func TestStore_Head_UnknownJustifiedIndex(t *testing.T) {
indices := make(map[[32]byte]uint64)
indices[r] = 1
s := &Store{nodesIndices: indices}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
_, err := s.head(context.Background(), r)
_, err := s.head(context.Background())
assert.ErrorContains(t, errInvalidJustifiedIndex.Error(), err)
}
@@ -73,7 +75,9 @@ func TestStore_Head_Itself(t *testing.T) {
// Since the justified node does not have a best descendant so the best node
// is itself.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
h, err := s.head(context.Background())
require.NoError(t, err)
assert.Equal(t, r, h)
}
@@ -86,7 +90,9 @@ func TestStore_Head_BestDescendant(t *testing.T) {
// Since the justified node's best descendant is at index 1, and its root is `best`,
// the head should be `best`.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1, parent: NonExistentNode}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
h, err := s.head(context.Background())
require.NoError(t, err)
assert.Equal(t, best, h)
}
@@ -99,7 +105,9 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: 1}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
cancel()
_, err := s.head(ctx, r)
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: r}
_, err := s.head(ctx)
require.ErrorContains(t, "context canceled", err)
}
@@ -123,6 +131,8 @@ func TestStore_Insert_KnownParent(t *testing.T) {
p := [32]byte{'B'}
s.nodesIndices[p] = 0
payloadHash := [32]byte{'c'}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, p, payloadHash, 1, 1))
assert.Equal(t, 2, len(s.nodes), "Did not insert block")
assert.Equal(t, 2, len(s.nodesIndices), "Did not insert block")
@@ -150,6 +160,8 @@ func TestStore_ApplyScoreChanges_UpdateWeightsPositiveDelta(t *testing.T) {
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
// they get propagated back.
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
require.NoError(t, s.applyWeightChanges(context.Background(), []uint64{}, []int{1, 1, 1}))
assert.Equal(t, uint64(103), s.nodes[0].weight)
assert.Equal(t, uint64(102), s.nodes[1].weight)
@@ -165,6 +177,8 @@ func TestStore_ApplyScoreChanges_UpdateWeightsNegativeDelta(t *testing.T) {
// Each node gets one unique vote which contributes to negative delta.
// The weight should look like 97 <- 98 <- 99 because they get propagated back.
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
require.NoError(t, s.applyWeightChanges(context.Background(), []uint64{}, []int{-1, -1, -1}))
assert.Equal(t, uint64(97), s.nodes[0].weight)
assert.Equal(t, uint64(98), s.nodes[1].weight)
@@ -179,6 +193,8 @@ func TestStore_ApplyScoreChanges_UpdateWeightsMixedDelta(t *testing.T) {
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one mixed vote. The weight should look like 100 <- 200 <- 250.
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
require.NoError(t, s.applyWeightChanges(context.Background(), []uint64{}, []int{-100, -50, 150}))
assert.Equal(t, uint64(100), s.nodes[0].weight)
assert.Equal(t, uint64(200), s.nodes[1].weight)
@@ -187,7 +203,9 @@ func TestStore_ApplyScoreChanges_UpdateWeightsMixedDelta(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_RemoveChild(t *testing.T) {
// Make parent's best child equal's to input child index and child is not viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {}}, justifiedEpoch: 1, finalizedEpoch: 1}
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{nodes: []*Node{{bestChild: 1}, {}}, justifiedCheckpoint: jc, finalizedCheckpoint: fc}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child and best descendant are `none`.
@@ -198,6 +216,8 @@ func TestStore_UpdateBestChildAndDescendant_RemoveChild(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
// Make parent's best child equal to child index and child is viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {bestDescendant: NonExistentNode}}}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child is the same and best descendant is not set to child index.
@@ -208,9 +228,11 @@ func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parent's best child doesn't lead to viable index.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
@@ -224,9 +246,11 @@ func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T)
func TestStore_UpdateBestChildAndDescendant_ChangeChildByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but child has more weight than parent's best child.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1}}}
@@ -239,9 +263,11 @@ func TestStore_UpdateBestChildAndDescendant_ChangeChildByWeight(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_ChangeChildAtLeaf(t *testing.T) {
// Make parent's best child to none and input child leads to viable index.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
@@ -255,9 +281,11 @@ func TestStore_UpdateBestChildAndDescendant_ChangeChildAtLeaf(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_NoChangeByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to not viable index and
// parents best child leads to viable index.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
@@ -271,9 +299,11 @@ func TestStore_UpdateBestChildAndDescendant_NoChangeByViability(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_NoChangeByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but parent's best child has more weight.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
@@ -286,9 +316,11 @@ func TestStore_UpdateBestChildAndDescendant_NoChangeByWeight(t *testing.T) {
func TestStore_UpdateBestChildAndDescendant_NoChangeAtLeaf(t *testing.T) {
// Make parent's best child to none and input child does not lead to viable index.
jc := &forkchoicetypes.Checkpoint{Epoch: 1}
fc := &forkchoicetypes.Checkpoint{Epoch: 1}
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
@@ -786,10 +818,12 @@ func TestStore_LeadsToViableHead(t *testing.T) {
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
nodes: []*Node{tc.n},
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
nodes: []*Node{tc.n},
}
got, err := s.leadsToViableHead(tc.n)
require.NoError(t, err)
@@ -812,9 +846,11 @@ func TestStore_ViableForHead(t *testing.T) {
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
}
assert.Equal(t, tc.want, s.viableForHead(tc.n))
}
@@ -984,7 +1020,7 @@ func TestStore_RemoveEquivocating(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
head, err := f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
head, err := f.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, head)
@@ -995,20 +1031,20 @@ func TestStore_RemoveEquivocating(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
head, err = f.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Insert two attestations for block b, it becomes head
f.ProcessAttestation(ctx, []uint64{1, 2}, [32]byte{'b'}, 1)
f.ProcessAttestation(ctx, []uint64{3}, [32]byte{'c'}, 1)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, head)
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
require.Equal(t, uint64(200), f.store.nodes[2].weight)
@@ -1016,7 +1052,7 @@ func TestStore_RemoveEquivocating(t *testing.T) {
// Process the same slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
require.Equal(t, uint64(200), f.store.nodes[2].weight)
@@ -1032,12 +1068,12 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
f := setup(1, 1)
jr := [32]byte{'j'}
fr := [32]byte{'f'}
jc := &ethpb.Checkpoint{Root: jr[:], Epoch: 3}
fc := &ethpb.Checkpoint{Root: fr[:], Epoch: 2}
jc := &forkchoicetypes.Checkpoint{Root: jr, Epoch: 3}
fc := &forkchoicetypes.Checkpoint{Root: fr, Epoch: 2}
require.NoError(t, f.UpdateJustifiedCheckpoint(jc))
require.NoError(t, f.UpdateFinalizedCheckpoint(fc))
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
require.Equal(t, f.store.justifiedCheckpoint, jc)
require.Equal(t, f.store.finalizedCheckpoint, fc)
}
func TestStore_InsertOptimisticChain(t *testing.T) {
@@ -1051,8 +1087,10 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
for i := uint64(2); i < 11; i++ {
blk := util.NewBeaconBlock()
blk.Block.Slot = types.Slot(i)
@@ -1060,8 +1098,10 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
blk.Block.ParentRoot = copiedRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
}

View File

@@ -3,6 +3,7 @@ package protoarray
import (
"sync"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
)
@@ -18,8 +19,8 @@ type ForkChoice struct {
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
pruneThreshold uint64 // do not prune tree unless threshold is reached.
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
@@ -30,6 +31,7 @@ type Store struct {
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex
}
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.

View File

@@ -55,11 +55,11 @@ func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
for _, node := range f.store.nodes {
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedEpoch {
f.store.justifiedEpoch = node.justifiedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
f.store.justifiedCheckpoint.Epoch = node.justifiedEpoch
}
if node.finalizedEpoch > f.store.finalizedEpoch {
f.store.finalizedEpoch = node.finalizedEpoch
if node.finalizedEpoch > f.store.finalizedCheckpoint.Epoch {
f.store.finalizedCheckpoint.Epoch = node.finalizedEpoch
}
}
}

View File

@@ -5,9 +5,9 @@ import (
"context"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -81,25 +81,25 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
// D is head even though its weight is lower.
hr := [32]byte{'d'}
state, blkRoot, err = prepareForkchoiceState(ctx, 103, hr, [32]byte{'b'}, [32]byte{'D'}, 2, 1)
ha := [32]byte{'a'}
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'b'}, [32]byte{'D'}, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 2, Root: hr[:]}))
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
require.NoError(t, f.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, hr, headRoot)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, uint64(0), f.store.nodes[4].weight)
require.Equal(t, uint64(100), f.store.nodes[3].weight)
// Update unrealized justification, c becomes head
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
}
@@ -158,30 +158,31 @@ func TestStore_NoDeadLock(t *testing.T) {
// Epoch 3
// Current Head is H
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(0), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// Insert Block I, it becomes Head
hr := [32]byte{'i'}
state, blkRoot, err = prepareForkchoiceState(ctx, 108, hr, [32]byte{'f'}, [32]byte{'I'}, 1, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 1, Root: hr[:]}))
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
ha := [32]byte{'a'}
require.NoError(t, f.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: ha}))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, hr, headRoot)
require.Equal(t, types.Epoch(1), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.FinalizedEpoch())
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(0), f.FinalizedCheckpoint().Epoch)
// Realized Justified checkpoints, H becomes head
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(2), f.JustifiedEpoch())
require.Equal(t, types.Epoch(1), f.FinalizedEpoch())
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(1), f.FinalizedCheckpoint().Epoch)
}
// Epoch 1 | Epoch 2
@@ -226,10 +227,10 @@ func TestStore_ForkNextEpoch(t *testing.T) {
// Insert an attestation to H, H is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'h'}, 1)
headRoot, err := f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err := f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(0), f.JustifiedEpoch())
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// D arrives late, D is head
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
@@ -237,10 +238,10 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
f.UpdateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, [32]byte{}, []uint64{100})
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, types.Epoch(1), f.JustifiedEpoch())
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
// nodes[8] = D since it's late!
require.Equal(t, uint64(0), f.store.nodes[8].weight)
require.Equal(t, uint64(100), f.store.nodes[7].weight)

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -15,7 +16,7 @@ func TestVotes_CanFindHead(t *testing.T) {
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err := f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -27,7 +28,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -39,7 +40,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -48,7 +49,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 1 <- +vote, new head
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 2)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(1), r, "Incorrect head for with justified epoch at 1")
@@ -57,7 +58,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// vote, new head -> 2 1
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 2)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -71,7 +72,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -82,7 +83,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 3 <- new vote
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(3), 3)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -93,7 +94,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 3 <- head
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(1), 3)
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 1")
@@ -109,7 +110,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -127,7 +128,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -188,7 +189,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
@@ -209,9 +210,11 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9 <- head
f.store.justifiedEpoch = 2
f.store.finalizedEpoch = 2
r, err = f.Head(context.Background(), indexToHash(5), balances)
jc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
f.store.justifiedCheckpoint = jc
f.store.finalizedCheckpoint = fc
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -237,7 +240,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -246,28 +249,28 @@ func TestVotes_CanFindHead(t *testing.T) {
// The new validators voted for 10.
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
// The new head should be 10.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Set the balances of the last 2 validators to 0.
balances = []uint64{1, 1, 1, 0, 0}
// The head should be back to 9.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// The head should be back to 9.
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
@@ -276,7 +279,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -300,7 +303,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
@@ -318,7 +321,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), indexToHash(5), balances)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -6,7 +6,9 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -1,8 +1,10 @@
package types
import (
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// ProposerBoostRootArgs to call the BoostProposerRoot function.
@@ -13,9 +15,17 @@ type ProposerBoostRootArgs struct {
SecondsIntoSlot uint64
}
// Checkpoint is an array version of ethpb.Checkpoint. It is used internally in
// forkchoice, while the slice version is used in the interface to legagy code
// in other packages
type Checkpoint struct {
Epoch types.Epoch
Root [fieldparams.RootLength]byte
}
// BlockAndCheckpoints to call the InsertOptimisticChain function
type BlockAndCheckpoints struct {
Block interfaces.BeaconBlock
JustifiedEpoch types.Epoch
FinalizedEpoch types.Epoch
Block interfaces.BeaconBlock
JustifiedCheckpoint *ethpb.Checkpoint
FinalizedCheckpoint *ethpb.Checkpoint
}

View File

@@ -348,9 +348,9 @@ func (b *BeaconNode) Close() {
func (b *BeaconNode) startForkChoice() {
if features.Get().EnableForkChoiceDoublyLinkedTree {
b.forkChoiceStore = doublylinkedtree.New(0, 0)
b.forkChoiceStore = doublylinkedtree.New()
} else {
b.forkChoiceStore = protoarray.New(0, 0)
b.forkChoiceStore = protoarray.New()
}
}

View File

@@ -1,5 +1,4 @@
//go:build go1.18
// +build go1.18
package p2p_test

View File

@@ -142,5 +142,7 @@ func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, err
"latestDifficulty": latestTtd.String(),
"terminalDifficulty": ttd.ToBig().String(),
}).Info("terminal difficulty has not been reached yet")
totalTerminalDifficulty.Set(float64(latestTtd.Uint64()))
return false, nil
}

View File

@@ -6,6 +6,10 @@ import (
)
var (
totalTerminalDifficulty = promauto.NewGauge(prometheus.GaugeOpts{
Name: "total_terminal_difficulty",
Help: "The total terminal difficulty of the execution chain before merge",
})
newPayloadLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_payload_v1_latency_milliseconds",

View File

@@ -15,6 +15,7 @@ go_library(
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/eth/v2:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
)
@@ -198,7 +199,7 @@ type versionResponseJson struct {
// syncingResponseJson is used in /node/syncing API endpoint.
type syncingResponseJson struct {
Data *syncInfoJson `json:"data"`
Data *helpers.SyncDetailsJson `json:"data"`
}
// beaconStateResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
@@ -775,12 +776,6 @@ type depositContractJson struct {
Address string `json:"address"`
}
type syncInfoJson struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
}
type attesterDutyJson struct {
Pubkey string `json:"pubkey" hex:"true"`
ValidatorIndex string `json:"validator_index"`
@@ -932,16 +927,10 @@ type singleIndexedVerificationFailureJson struct {
type nodeSyncDetailsErrorJson struct {
apimiddleware.DefaultErrorJson
SyncDetails syncDetails `json:"sync_details"`
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
}
type eventErrorJson struct {
StatusCode int `json:"status_code"`
Message string `json:"message"`
}
type syncDetails struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
}

View File

@@ -53,7 +53,7 @@ func (e *blockIdParseError) Error() string {
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
// This is already a grpc error, so we can't wrap it any further
return nil, err
}

View File

@@ -8,7 +8,7 @@ go_library(
"validator_status.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers",
visibility = ["//beacon-chain/rpc/eth:__subpackages__"],
visibility = ["//visibility:public"],
deps = [
"//api/grpc:go_default_library",
"//beacon-chain/blockchain:go_default_library",

View File

@@ -34,15 +34,3 @@ type SingleIndexedVerificationFailure struct {
Index int `json:"index"`
Message string `json:"message"`
}
// SyncDetails contain details about sync status.
type SyncDetails struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
}
// SyncDetailsContainer is a wrapper for SyncDetails.
type SyncDetailsContainer struct {
SyncDetails *SyncDetails `json:"sync_details"`
}

View File

@@ -1,6 +1,7 @@
package helpers
import (
"bytes"
"context"
"strconv"
@@ -9,25 +10,45 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/config/params"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ValidateSync checks whether the node is currently syncing and returns an error if it is.
// It also appends syncing info to gRPC headers.
func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blockchain.HeadFetcher, timeFetcher blockchain.TimeFetcher) error {
func ValidateSync(
ctx context.Context,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
timeFetcher blockchain.TimeFetcher,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) error {
if !syncChecker.Syncing() {
return nil
}
headSlot := headFetcher.HeadSlot()
syncDetailsContainer := &SyncDetailsContainer{
SyncDetails: &SyncDetails{
isOptimistic := false
headState, err := headFetcher.HeadState(ctx)
if err != nil {
return status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err = IsOptimistic(ctx, headState, optimisticModeFetcher)
if err != nil {
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
syncDetailsContainer := &syncDetailsContainer{
SyncDetails: &SyncDetailsJson{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
IsOptimistic: isOptimistic,
},
}
err := grpc.AppendCustomErrorHeader(ctx, syncDetailsContainer)
err = grpc.AppendCustomErrorHeader(ctx, syncDetailsContainer)
if err != nil {
return status.Errorf(
codes.Internal,
@@ -39,20 +60,36 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
}
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return false, errors.Wrap(err, "could not get state root")
}
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticModeFetcher blockchain.OptimisticModeFetcher) (bool, error) {
header := st.LatestBlockHeader()
header.StateRoot = root[:]
// This happens when the block at the state's slot is not missing.
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return false, errors.Wrap(err, "could not get state root")
}
header.StateRoot = root[:]
}
headRoot, err := header.HashTreeRoot()
if err != nil {
return false, errors.Wrap(err, "could not get header root")
}
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
isOptimistic, err := optimisticModeFetcher.IsOptimisticForRoot(ctx, headRoot)
if err != nil {
return false, errors.Wrap(err, "could not check if block is optimistic")
}
return isOptimistic, nil
}
// SyncDetailsJson contains information about node sync status.
type SyncDetailsJson struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
IsOptimistic bool `json:"is_optimistic"`
}
// SyncDetailsContainer is a wrapper for SyncDetails.
type syncDetailsContainer struct {
SyncDetails *SyncDetailsJson `json:"sync_details"`
}

View File

@@ -30,7 +30,7 @@ func TestValidateSync(t *testing.T) {
Slot: &headSlot,
State: st,
}
err = ValidateSync(ctx, syncChecker, chainService, chainService)
err = ValidateSync(ctx, syncChecker, chainService, chainService, chainService)
require.NotNil(t, err)
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
require.Equal(t, true, ok, "type assertion failed")
@@ -39,7 +39,7 @@ func TestValidateSync(t *testing.T) {
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
assert.DeepEqual(
t,
[]string{"{\"sync_details\":{\"head_slot\":\"50\",\"sync_distance\":\"50\",\"is_syncing\":true}}"},
[]string{"{\"sync_details\":{\"head_slot\":\"50\",\"sync_distance\":\"50\",\"is_syncing\":true,\"is_optimistic\":false}}"},
v,
)
})
@@ -47,7 +47,15 @@ func TestValidateSync(t *testing.T) {
syncChecker := &syncmock.Sync{
IsSyncing: false,
}
err := ValidateSync(ctx, syncChecker, nil, nil)
headSlot := types.Slot(100)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(50))
chainService := &chainmock.ChainService{
Slot: &headSlot,
State: st,
}
err = ValidateSync(ctx, syncChecker, nil, nil, chainService)
require.NoError(t, err)
})
}
@@ -69,4 +77,29 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, false, o)
})
t.Run("zero state root", func(t *testing.T) {
zeroRootSt, err := util.NewBeaconState()
require.NoError(t, err)
h := zeroRootSt.LatestBlockHeader()
h.StateRoot = make([]byte, 32)
require.NoError(t, zeroRootSt.SetLatestBlockHeader(h))
mockOptSyncFetcher := &chainmock.ChainService{}
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.DeepEqual(
t,
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
mockOptSyncFetcher.OptimisticCheckRootReceived,
)
})
t.Run("non-zero state root", func(t *testing.T) {
mockOptSyncFetcher := &chainmock.ChainService{}
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.DeepEqual(
t,
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
mockOptSyncFetcher.OptimisticCheckRootReceived,
)
})
}

View File

@@ -15,6 +15,7 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/migration:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
rpchelpers "github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1"
"github.com/prysmaticlabs/prysm/proto/migration"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -266,11 +267,23 @@ func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.S
defer span.End()
headSlot := ns.HeadFetcher.HeadSlot()
headState, err := ns.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, headState, ns.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
return &ethpb.SyncingResponse{
Data: &ethpb.SyncInfo{
HeadSlot: headSlot,
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
IsSyncing: ns.SyncChecker.Syncing(),
IsOptimistic: isOptimistic,
},
}, nil
}

View File

@@ -167,20 +167,22 @@ func TestSyncStatus(t *testing.T) {
require.NoError(t, err)
err = state.SetSlot(100)
require.NoError(t, err)
chainService := &mock.ChainService{Slot: currentSlot, State: state}
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
syncChecker := &syncmock.Sync{}
syncChecker.IsSyncing = true
s := &Server{
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
SyncChecker: syncChecker,
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
}
resp, err := s.GetSyncStatus(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, types.Slot(100), resp.Data.HeadSlot)
assert.Equal(t, types.Slot(10), resp.Data.SyncDistance)
assert.Equal(t, true, resp.Data.IsSyncing)
assert.Equal(t, true, resp.Data.IsOptimistic)
}
func TestGetPeer(t *testing.T) {

View File

@@ -15,12 +15,13 @@ import (
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
// version information.
type Server struct {
SyncChecker sync.Checker
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
SyncChecker sync.Checker
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
}

View File

@@ -42,7 +42,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -126,7 +126,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -204,7 +204,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -274,7 +274,7 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -291,7 +291,7 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -355,7 +355,7 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -426,7 +426,7 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -493,7 +493,7 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -685,7 +685,7 @@ func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *et
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -762,7 +762,7 @@ func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethp
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}

View File

@@ -249,13 +249,16 @@ func TestGetAttesterDuties(t *testing.T) {
}
func TestGetAttesterDuties_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.GetAttesterDuties(context.Background(), &ethpbv1.AttesterDutiesRequest{})
_, err = vs.GetAttesterDuties(context.Background(), &ethpbv1.AttesterDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -406,13 +409,16 @@ func TestGetProposerDuties(t *testing.T) {
}
func TestGetProposerDuties_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.GetProposerDuties(context.Background(), &ethpbv1.ProposerDutiesRequest{})
_, err = vs.GetProposerDuties(context.Background(), &ethpbv1.ProposerDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -631,14 +637,16 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
}
func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.GetSyncCommitteeDuties(context.Background(), &ethpbv2.SyncCommitteeDutiesRequest{})
_, err = vs.GetSyncCommitteeDuties(context.Background(), &ethpbv2.SyncCommitteeDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -751,13 +759,16 @@ func TestProduceBlock(t *testing.T) {
}
func TestProduceBlock_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.ProduceBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
_, err = vs.ProduceBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -1728,24 +1739,30 @@ func TestProduceBlockV2SSZ(t *testing.T) {
}
func TestProduceBlockV2_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.ProduceBlockV2(context.Background(), &ethpbv1.ProduceBlockRequest{})
_, err = vs.ProduceBlockV2(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceBlockV2SSZ_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.ProduceBlockV2SSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
_, err = vs.ProduceBlockV2SSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -2717,24 +2734,30 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
}
func TestProduceBlindedBlock_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.ProduceBlindedBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
_, err = vs.ProduceBlindedBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceBlindedBlockSSZ_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.ProduceBlindedBlockSSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
_, err = vs.ProduceBlindedBlockSSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -3100,13 +3123,16 @@ func TestSubmitBeaconCommitteeSubscription(t *testing.T) {
}
func TestSubmitBeaconCommitteeSubscription_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.SubmitBeaconCommitteeSubscription(context.Background(), &ethpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
_, err = vs.SubmitBeaconCommitteeSubscription(context.Background(), &ethpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
@@ -3256,13 +3282,16 @@ func TestSubmitSyncCommitteeSubscription(t *testing.T) {
}
func TestSubmitSyncCommitteeSubscription_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
st, err := util.NewBeaconState()
require.NoError(t, err)
chainService := &mockChain.ChainService{State: st}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.SubmitSyncCommitteeSubscription(context.Background(), &ethpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
_, err = vs.SubmitSyncCommitteeSubscription(context.Background(), &ethpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}

View File

@@ -393,6 +393,10 @@ func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, err
if err != nil {
return nil, status.Error(codes.Internal, "Could not get head block")
}
optimisticStatus, err := bs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get optimistic status")
}
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
return nil, status.Errorf(codes.NotFound, "Head block of chain was nil: %v", err)
}
@@ -474,5 +478,6 @@ func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, err
PreviousJustifiedSlot: pjSlot,
PreviousJustifiedEpoch: prevJustifiedCheckpoint.Epoch,
PreviousJustifiedBlockRoot: prevJustifiedCheckpoint.Root,
OptimisticStatus: optimisticStatus,
}, nil
}

View File

@@ -426,6 +426,7 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) {
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err = bs.GetChainHead(context.Background(), nil)
require.ErrorContains(t, "Could not get genesis block", err)
@@ -461,6 +462,7 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err = bs.GetChainHead(context.Background(), nil)
@@ -469,7 +471,8 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
func TestServer_GetChainHead_NoHeadBlock(t *testing.T) {
bs := &Server{
HeadFetcher: &chainMock.ChainService{Block: nil},
HeadFetcher: &chainMock.ChainService{Block: nil},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err := bs.GetChainHead(context.Background(), nil)
assert.ErrorContains(t, "Head block of chain was nil", err)
@@ -531,8 +534,9 @@ func TestServer_GetChainHead(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bs := &Server{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizationFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
@@ -550,6 +554,7 @@ func TestServer_GetChainHead(t *testing.T) {
assert.DeepEqual(t, pjRoot[:], head.PreviousJustifiedBlockRoot, "Unexpected PreviousJustifiedBlockRoot")
assert.DeepEqual(t, jRoot[:], head.JustifiedBlockRoot, "Unexpected JustifiedBlockRoot")
assert.DeepEqual(t, fRoot[:], head.FinalizedBlockRoot, "Unexpected FinalizedBlockRoot")
assert.Equal(t, false, head.OptimisticStatus)
}
func TestServer_StreamChainHead_ContextCanceled(t *testing.T) {
@@ -645,6 +650,7 @@ func TestServer_StreamChainHead_OnHeadUpdated(t *testing.T) {
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
}
exitRoutine := make(chan bool)
ctrl := gomock.NewController(t)

View File

@@ -47,4 +47,5 @@ type Server struct {
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
HeadUpdater blockchain.HeadUpdater
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}

View File

@@ -12,8 +12,8 @@ func (ds *Server) GetForkChoice(_ context.Context, _ *empty.Empty) (*pbrpc.ForkC
store := ds.ForkFetcher.ForkChoicer()
return &pbrpc.ForkChoiceResponse{
JustifiedEpoch: store.JustifiedEpoch(),
FinalizedEpoch: store.FinalizedEpoch(),
JustifiedEpoch: store.JustifiedCheckpoint().Epoch,
FinalizedEpoch: store.FinalizedCheckpoint().Epoch,
ForkchoiceNodes: store.ForkChoiceNodes(),
}, nil
}

View File

@@ -12,10 +12,10 @@ import (
)
func TestServer_GetForkChoice_ProtoArray(t *testing.T) {
store := protoarray.New(0, 0)
store := protoarray.New()
bs := &Server{ForkFetcher: &mock.ChainService{ForkChoiceStore: store}}
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
require.NoError(t, err)
assert.Equal(t, store.JustifiedEpoch(), res.JustifiedEpoch, "Did not get wanted justified epoch")
assert.Equal(t, store.FinalizedEpoch(), res.FinalizedEpoch, "Did not get wanted finalized epoch")
assert.Equal(t, store.JustifiedCheckpoint().Epoch, res.JustifiedEpoch, "Did not get wanted justified epoch")
assert.Equal(t, store.FinalizedCheckpoint().Epoch, res.FinalizedEpoch, "Did not get wanted finalized epoch")
}

Some files were not shown because too many files have changed in this diff Show More