Compare commits

...

55 Commits

Author SHA1 Message Date
nisdas
1d6200a540 add change in 2023-05-16 16:32:02 +08:00
Potuz
ad749a40b6 Save to checkpoint cache if the nsc hits (#12398)
* Save to checkpoint cache if the nsc hits

Also move the head check before the checkpoint cache check

* add unit test
2023-05-13 09:54:33 -03:00
terencechain
9b13454457 Metrics: Invert too late and too early att received count (#12392)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 19:45:43 +00:00
Potuz
b9917807d8 Ignore some untimely attestations (#12387)
* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Fix AttestationTargetState mock

* Fix ineffectual assignment lint

* Fix blockchain tests

* Fix build

* Add Nil check

* add comment on lock

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 16:55:33 +00:00
Raul Jordan
e5c9387cd9 Update Github Actions Go Version (#12391)
* update github actions

* use quotes or it is go 1.2 

lol

* Update gosec

* Update gosec

* Update go lint

* fix gosec violations

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 15:51:20 +00:00
Preston Van Loon
2c3b3b802a Revert "Add a new slot ticker and use it on attestation aggregation" (#12390)
This reverts commit f6764fe62b.
2023-05-12 14:49:37 +00:00
Simon
3ef3e1d13b fix-subnets-oom (#12388)
fix-subnets-oom, close iterator after using it
2023-05-12 07:52:17 -05:00
Nick Sullivan
5c00fcb84f Fix numerous spelling error and typos in the log messages, comments, and documentation. (#12385)
* Minor typos and spelling fixes (comments, logs, & docs only, no code changes)

* Fix seplling in log message

* Additional spelling tweaks based on review from @prestonvanloon
2023-05-11 20:45:43 +00:00
Nishant Das
aef22bf54e Add In Support For Builder in E2E (#12343)
* fix it up

* add gaz

* add changes in

* finally runs

* fix it

* add progress

* add capella support

* save progress

* remove debug logs

* cleanup

* remove log

* fix flag

* remove unused lock

* gaz

* change

* fix

* lint

* james review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-11 11:10:29 -05:00
Potuz
f6764fe62b Add a new slot ticker and use it on attestation aggregation (#12377)
* Add slot ticker with intervals

* add flags for aggregation duration

* misspelling

* hide flags

* fix flags and default durations

* lint

* wait for initial sync

* deep source

* add log

* Preston's review

* fix error message

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-10 12:48:51 +00:00
Preston Van Loon
07db0dc448 CI: Add support for buildbuddy uploads (#12378)
* Add build metadata

* Add buildbuddy flags

* more metadata

* fix latest tag

* fix branch

* revert branch change

* touch a file to trigger build

* remove unknown command

* fix script

* Update latest_version_tag.sh
2023-05-10 12:30:40 +00:00
Preston Van Loon
4b4e213a24 stategen: Pre-populate bls pubkey cache as part of stategen's Resume function (#11482)
* Pre-populate bls pubkey cache as part of state gen's Resume function. This adds some helpers and a benchmark to blst

* Do it async

* fix missing import

* lint

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: rauljordan <raul@prysmaticlabs.com>
2023-05-10 10:44:15 +00:00
kasey
7d9f36985e Fix initialization race (#12374)
* block all the sync startup code on init signal

* don't need chainStarted if everything blocks

* set empty clock by default to work around panics

* remove unused clock, zero-value for init-sync

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-10 04:09:15 +00:00
james-prysm
98f8ca4e34 reverting version check on bid header (#12376)
* adding reverting change

* adding additional tests and checks

* removing comment

* updating based on review suggestions

* fixing deepsource issues

* attempting to resolve dependencies

* removing dependency

* accidently flipped if statement with deepsource fix

* fixing unit test
2023-05-10 08:45:09 +08:00
james-prysm
535b38395e migrating code from PR#12343 (#12371) 2023-05-08 09:33:26 -05:00
terencechain
4bd7e8aefd WeiToGwei copy input big.int (#12370) 2023-05-07 15:15:08 +00:00
Raul Jordan
6f383f272a Do Not Panic on Broadcasting Nil Object (#12369) 2023-05-07 05:00:30 +00:00
terencechain
dbb8279fe6 Ran update-go-pbs.sh (#12359)
* Ran update-go-pbs.sh

* Fix import
2023-05-05 17:44:51 +00:00
Nishant Das
b6625554dd fix build (#12358) 2023-05-05 08:32:56 -05:00
Radosław Kapka
bd833e1c12 Use v1alpha1 server in block production (#12336)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-05-04 19:52:41 +02:00
kasey
918129cf36 Replace statefeed Initialize (#12285)
* refactor initialization to blocking startup method

* require genesisSetter in blockchain, fix tests

* work-around gazelle weirdness

* fix dep gazelle ignores

* only call SetGenesis once

* fix typo

* validator test setup and fix to return right error

* move waitForChainStart to Start

* wire up sync Service.genesisWaiter

* fix p2p genesisWaiter plumbing

* remove extra clock type, integrate into genesis

and rename

* use time.Now when no Nower is specified

* remove unused ClockSetter

* simplify rpc context checking

* fix typo

* use clock everywhere in sync; [32]byte val root

* don't use DeepEqual to compare [32]byte and []byte

* don't use clock in init sync, not wired up yet

* use clock waiter in blockchain as well

* use cancelable contexts in tests with goroutines

* missed a reference to WithClockSetter

* Update beacon-chain/startup/genesis.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/blockchain/service_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* more clear docs

* doc for NewClock

* move clock typedef to more logical file name

* adding documentation

* gaz

* fixes for capella

* reducing test raciness

* fix races in committee cache tests

* lint

* add tests on Duration slot math helper

* startup package test coverage

* fix bad merge

* set non-zero genesis time in tests that call Start

* happy deepsource, happy me-epsource

* replace Synced event with channel

* remove unused error

* remove accidental wip commit

* gaz!

* remove unused event constants

* remove sync statefeed subscription to fix deadlock

* remove state notifier

* fix build

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
2023-05-03 04:34:01 +00:00
terencechain
5b8084b829 Proposer builds block in parallel. (Consensus vs Execution) (#12297)
* Proposer builds block in parallel. Cosensus vs Execution

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Use feature flag

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2023-05-01 14:37:26 +00:00
james-prysm
83416f31a5 Unregister validator - fix behind feature flag (#12316)
* adding changes to blocks

* trying out expiration

* adding implementation, have WIP for tests

* adding unit tests for cache

* fixing bazel complaints

* fix linting

* adding safe check for unint type

* changing approach to safety check

* adding cache to bazel to test fixing build

* reverting bazel change and adding flag to usage

* implementing interface on mock to fix build error

* fixing unit tests

* fixing unit test

* fixing unit tests

* fixing linting

* fixing more unit tests

* fixing produce blinded block tests

* Update beacon-chain/cache/registration.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* resolving review comments

* fixing cache

* Update beacon-chain/cache/registration.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/cache/registration.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* fixing time logic

* adding context to trace

* fix bazel lint

* fixing context dependency

* fix linting

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* addressing review comments

* fixing deepsource issues

* improving the default settings

* fixing bazel

* removing irrelevant unit test

* updating name

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2023-04-28 21:27:47 +00:00
Sammy Rosso
d382abe198 InsertFinalizedDeposits: return an error (#12342)
* InsertFinalizedDeposits: return an error

* Remove logging

* Radek' Review

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-04-28 20:59:09 +00:00
terencechain
8481a3e6ec Beacon-api: save atts to pool (#12345)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-27 22:47:03 +02:00
james-prysm
f9f40975c5 Validator registration filter non active keys before submission (#12322)
* adding filter for validator registration

* adding new filter logic based on validator status

* make sure to check status each time

* WIP unit testing

* fixing unit tests

* adding ux improvement

* addressing nishant's comments

* cleanup for already slice error

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-04-27 13:10:18 -05:00
Potuz
b56f3e70df log correctly the head changed when it actually changes (#12324)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-27 17:13:58 +00:00
terencechain
ce0616b88e Add verify slot tolerance test (#12344)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-27 15:53:28 +00:00
Preston Van Loon
7dffee1c66 Update to go1.20 (#12333)
* Update to go1.20

* Update gohashtree

* fix build tags for bytesutil and add nilness exclusion

* more broad nilness exclusion

* Reset nogo config

* Update golang.org/x/tools

* update rules_go to 0.39.0

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-27 15:41:57 +00:00
kasey
cec631d760 Fixing flaky validator tests (#12339)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-27 13:05:13 +00:00
kasey
ba9a744552 Keymanager mock cleanup (#12341)
* add callback for deterministic key fetch ordering

* provide a nicer way to control key ordering

* more concise test setup = less confusion

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-04-27 05:25:17 +00:00
kasey
f962d13407 Fix committee test race (#12338)
* fix races in committee cache tests

* lint

* gratuitous defer ClearCache if ClearCache

* log warning to avoid failed block processing

* gaz

* add Clear to cache stubs

* fix Clear mistakes

* last fake cache fix

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-04-27 00:50:04 +00:00
Radosław Kapka
34b6c8f333 Use mock v1alpha1 server in Beacon API's validator functions (#12319) 2023-04-26 18:59:29 +02:00
terencechain
73fea51398 Use next slot cache for payload attribute (#12286) 2023-04-25 16:13:24 -07:00
terencechain
fa7a2bd445 Move update next slot cache earlier (#12321)
* Move next slot cache earlier

* update next slot cache if it's head
2023-04-25 21:03:54 +00:00
Preston Van Loon
3894d60282 blockchain: add more spans to analyze block production timing (#12332)
* blockchain: add more spans to analyze block production timing

* There is a reason that we do not use the incoming context for forkchoice updates
2023-04-25 15:45:08 +00:00
Radosław Kapka
328e6fb074 Move some errors to separate consensus-types package (#12329)
* move `ErrNilObjectWrapped` to separate package

* build fix

* move not supported

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-25 15:07:24 +00:00
Radosław Kapka
9e3598334d Avoid double state copy in latestAncestor (#12326)
* Avoid double state copy in `latestAncestor`

* remove trailing newline

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2023-04-25 14:12:56 +00:00
Radosław Kapka
ebccdfadb5 Remove unused argument from field index (#12327)
* Remove unused argument from field index

* bzl
2023-04-25 13:03:55 +00:00
Cyyber Ttechno
efdda168c5 Corrected err return for GenesisState while calling db.View (#12325) 2023-04-25 02:57:07 +00:00
terencechain
08d6eccfb3 Change native execution value to Gwei Uint64 (#12291)
* Default value to uint64

* Fix spectest

* Fix tests

* Fix tests

* Fix tests

* Fix test

* Fix test

* Fix build

* Potuz feedback

* Add test

* Fix test

* Fix test
2023-04-22 00:44:28 +00:00
Nishant Das
97a32e1b72 Fix Status Checks For Double Deposits (#12318)
* fix status

* fix test

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2023-04-21 12:08:55 +00:00
Radosław Kapka
b7ef762701 Return errors from ProduceBlindedBlock (#12309)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-04-21 11:41:43 +02:00
dependabot[bot]
c9e333723e Bump k8s.io/client-go from 0.18.3 to 0.20.0 (#11972)
Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.18.3 to 0.20.0.
- [Release notes](https://github.com/kubernetes/client-go/releases)
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.18.3...v0.20.0)

---
updated-dependencies:
- dependency-name: k8s.io/client-go
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-04-20 17:41:04 +00:00
Potuz
300a827027 Use same signature for unrealized justified hash and justified hash (#12314)
* Use same signature for unrealized justified hash and justified hash

* fix build
2023-04-20 15:59:12 +00:00
Nishant Das
6374cc0118 Add Exceptions For Transaction Fuzzer (#12313)
* fix it

* comment
2023-04-20 14:42:51 +00:00
Preston Van Loon
9f886da1de db: Deduplicate saveCheckpoint functionality (#12304)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-20 12:13:21 +00:00
james-prysm
7e7a2a2959 improve the blinded block error (#12310) 2023-04-19 18:27:00 -05:00
james-prysm
b0bf402469 version check to blinded block (#12305) 2023-04-19 10:55:36 -05:00
james-prysm
a090751c8c removing phrasing on not being able to withdraw (#12306) 2023-04-19 11:46:27 +08:00
Justin Traglia
4c916403e9 Enable dupword linter & fix findings (#12271)
* Enable dupword linter & fix findings

* Correct an incorrect fix

* Add nolint comment

* Add another nolint comment

* Revert unintended change to bazel version

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-04-18 22:01:27 +00:00
Justin Traglia
b6181f8d1a Enable nilerr linter & fix findings (#12270)
* Enable nilerr linter & fix findings

* Deal with other findings

* Fix another finding that I missed somehow

* Fix another another issue

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update tests to expect error

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2023-04-18 20:53:16 +00:00
Preston Van Loon
00001c8628 builder: add more tracing data for builder code paths (#12302)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-04-18 19:20:42 +00:00
Justin Traglia
8428a79971 Enable whitespace linter & fix findings (#12273)
* Enable whitespace linter & fix findings

* Fix new finding

* fix new violation

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-04-18 18:58:27 +00:00
terencechain
03f63f294b Update spec test to v1.3 (#12300) 2023-04-18 16:10:55 +00:00
430 changed files with 8452 additions and 8523 deletions

View File

@@ -43,4 +43,12 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io

View File

@@ -26,14 +26,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.50.1
version: v1.52.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Check out code into the Go module directory

View File

@@ -17,6 +17,9 @@ linters:
- errcheck
- gosimple
- gocognit
- dupword
- nilerr
- whitespace
- misspell
linters-settings:

View File

@@ -86,10 +86,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d",
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
],
)
@@ -164,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.19.8",
go_version = "1.20.3",
nogo = "@//:nogo",
)
@@ -205,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0-rc.5"
consensus_spec_version = "v1.3.0"
bls_test_version = "v0.1.1"
@@ -221,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "266006512e71e62396e8f31be01639560c9d59a93c38220fd8f51fabefc8f5f3",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2ebf483830165909cb7961562fd369dedf079997a4832cc215a543898a73aa46",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "333718ba5c907e0a99580caa8d28dd710543b3b271e4251581006d0e101fbce9",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "78b6925b5a4208e32385fa4387d2c27b381a8ddd18d66d5a7787e7846b86bfc8",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -126,7 +126,7 @@ type WeakSubjectivityData struct {
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)

View File

@@ -11,10 +11,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",

View File

@@ -3,10 +3,13 @@ package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -38,7 +41,7 @@ type signedBuilderBid struct {
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
w := signedBuilderBid{p: p}
if w.IsNil() {
return nil, blocks.ErrNilObjectWrapped
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
@@ -71,7 +74,7 @@ type signedBuilderBidCapella struct {
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
w := signedBuilderBidCapella{p: p}
if w.IsNil() {
return nil, blocks.ErrNilObjectWrapped
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
@@ -104,7 +107,7 @@ type builderBid struct {
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
w := builderBid{p: p}
if w.IsNil() {
return nil, blocks.ErrNilObjectWrapped
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
@@ -152,16 +155,19 @@ type builderBidCapella struct {
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
w := builderBidCapella{p: p}
if w.IsNil() {
return nil, blocks.ErrNilObjectWrapped
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := bytesutil.ReverseByteOrder(b.p.Value)
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, big.NewInt(0).SetBytes(v))
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
}
// Version --

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@@ -246,7 +245,6 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
@@ -308,6 +306,9 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
@@ -338,11 +339,14 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
return blocks.WrappedExecutionPayloadCapella(p, 0)
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}

View File

@@ -313,6 +313,26 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)

View File

@@ -346,6 +346,74 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
}, nil
}
// FromProto converts a proto execution payload type to our builder
// compatible payload type.
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayload{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
return ExecutionPayload{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
}, nil
}
// FromProtoCapella converts a proto execution payload type for capella to our
// builder compatible payload type.
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayloadCapella{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
for i, w := range payload.Withdrawals {
withdrawals[i] = Withdrawal{
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
Address: w.Address,
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
}
}
return ExecutionPayloadCapella{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`

View File

@@ -14,7 +14,7 @@ type WorkerResults struct {
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {

View File

@@ -58,11 +58,13 @@ go_library(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
@@ -118,6 +120,7 @@ go_test(
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
"setup_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -167,6 +170,7 @@ go_test(
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
"setup_test.go",
],
embed = [":go_default_library"],
gc_goopts = [

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -85,6 +86,12 @@ type ForkFetcher interface {
TimeFetcher
}
// TemporalOracle is like ForkFetcher minus CurrentFork()
type TemporalOracle interface {
GenesisFetcher
TimeFetcher
}
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
@@ -96,7 +103,7 @@ type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
UnrealizedJustifiedPayloadBlockHash() ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
@@ -327,7 +334,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
@@ -374,6 +381,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -80,7 +80,7 @@ func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
}
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
func (s *Service) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()

View File

@@ -10,7 +10,6 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -72,16 +71,8 @@ func TestHeadRoot_Nil(t *testing.T) {
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -97,16 +88,8 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
@@ -120,16 +103,8 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
}
func TestFinalizedBlockHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
r := [32]byte{'f'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
@@ -153,8 +128,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
h, err := service.UnrealizedJustifiedPayloadBlockHash()
require.NoError(t, err)
h := service.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
}
@@ -171,16 +145,9 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -190,16 +157,8 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_UseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
service.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()

View File

@@ -25,8 +25,11 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -62,11 +62,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash, err := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
if err != nil {
log.WithError(err).Error("Could not get unrealized justified payload block hash")
justifiedHash = finalizedHash
}
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
@@ -74,7 +70,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
@@ -255,7 +251,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
@@ -265,10 +261,13 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
// Get previous randao.
st = st.Copy()
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {

View File

@@ -9,14 +9,11 @@ import (
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
bstate "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -33,23 +30,16 @@ import (
)
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -96,23 +86,15 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
}
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -264,8 +246,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
@@ -297,12 +279,6 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
// Insert blocks into forkchoice
service := setupBeaconChain(t, beaconDB)
fcs := doublylinkedtree.New()
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -358,8 +334,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
@@ -414,12 +390,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
// Insert blocks into forkchoice
service := setupBeaconChain(t, beaconDB)
fcs := doublylinkedtree.New()
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -497,15 +467,9 @@ func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx, fcs := tr.ctx, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
@@ -536,8 +500,6 @@ func Test_NotifyNewPayload(t *testing.T) {
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
r, err := bellatrixBlk.Block().HashTreeRoot()
@@ -744,14 +706,10 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
@@ -764,8 +722,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
Header: gethtypes.Header{
@@ -788,19 +744,11 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
}
func Test_GetPayloadAttribute(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
@@ -809,7 +757,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -819,31 +767,24 @@ func Test_GetPayloadAttribute(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
hook := logTest.NewGlobal()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -851,19 +792,11 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
}
func Test_GetPayloadAttributeV2(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateCapella(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
@@ -872,7 +805,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -885,7 +818,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
@@ -897,18 +830,9 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fcs)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
var genesisStateRoot [32]byte
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
@@ -1013,16 +937,8 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
}
func TestService_removeInvalidBlockAndState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
// Deleting unknown block should not error.
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
@@ -1066,18 +982,10 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
}
func TestService_getPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
_, err = service.getPayloadHash(ctx, []byte{})
_, err := service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewProposer(slot primitives.Slot) bool {
@@ -49,21 +50,27 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
// it returns true if the new head is updated
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead {
return nil
return false, nil
}
isNewProposer := s.isNewProposer(proposingSlot)
if isNewProposer && !features.Get().DisableReorgLateBlocks {
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return nil
return false, nil
}
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
return false, nil
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
@@ -72,7 +79,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
headBlock: headBlock.Block(),
})
if err != nil {
return errors.Wrap(err, "could not notify forkchoice update")
return false, errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
@@ -83,7 +90,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
if err := s.pruneAttsFromPool(headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return nil
return true, nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -8,8 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -76,7 +74,8 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1))
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
require.NoError(t, err)
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
@@ -84,7 +83,8 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
@@ -108,7 +108,8 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()))
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
@@ -125,7 +126,8 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1))
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
@@ -135,28 +137,21 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
require.NoError(t, err)
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
@@ -188,24 +183,17 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
service.head.block = sb
service.head.state = st
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
require.NoError(t, err)
}
func TestShouldOverrideFCU(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
require.NoError(t, err)
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}

View File

@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.

View File

@@ -9,10 +9,8 @@ import (
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -581,18 +579,9 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
ctx := context.Background()
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusBlocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -53,14 +53,13 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case err != nil:
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
}
log.Info("Finished applying state transition")
return nil

View File

@@ -5,16 +5,19 @@ import (
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
cs := startup.NewClockSynchronizer()
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithClockSynchronizer(cs),
}
}
@@ -22,5 +25,6 @@ func testServiceOptsWithDB(t *testing.T) []Option {
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
return []Option{}
cs := startup.NewClockSynchronizer()
return []Option{WithClockSynchronizer(cs)}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -163,3 +164,11 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return nil
}
}
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
return func(s *Service) error {
s.clockSetter = gs
s.clockWaiter = gs
return nil
}
}

View File

@@ -1,17 +1,13 @@
package blockchain
import (
"context"
"fmt"
"math/big"
"testing"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -108,16 +104,8 @@ func Test_validateMergeBlock(t *testing.T) {
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -158,16 +146,8 @@ func Test_validateMergeBlock(t *testing.T) {
}
func Test_getBlkParentHashAndTD(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
@@ -239,14 +219,9 @@ func Test_validateTerminalBlockHash(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, ok)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
blk.SetSlot(1)

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -18,7 +19,17 @@ import (
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
@@ -32,7 +43,36 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
if cachedState != nil && !cachedState.IsNil() {
return cachedState, nil
}
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
// but is cheap
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute epoch start")
}
cachedState = transition.NextSlotState(c.Root, slot)
if cachedState != nil && !cachedState.IsNil() {
if cachedState.Slot() != slot {
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return cachedState, nil
}
// Do not process attestations for old non viable checkpoints otherwise
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
if err != nil {
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
}
if !ok {
return nil, ErrNotCheckpoint
}
// Fallback to state regeneration.
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
@@ -55,7 +95,6 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return baseState, nil
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.

View File

@@ -6,9 +6,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -21,29 +18,29 @@ import (
)
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
_, err := blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -54,7 +51,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 2
blkWithValidState.Block.Slot = 32
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
@@ -69,6 +66,10 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
service.head = &head{
state: st,
}
tests := []struct {
name string
a *ethpb.Attestation
@@ -79,11 +80,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
@@ -128,17 +124,9 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t)
ctx := tr.ctx
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -158,15 +146,8 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
}
func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -187,6 +168,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
@@ -194,8 +178,17 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
require.ErrorIs(t, ErrNotCheckpoint, err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
@@ -214,26 +207,30 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
}
func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -243,8 +240,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
epoch = 2
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
blk = util.NewBeaconBlock()
blk.Block.Slot = 64
r2, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
@@ -323,3 +328,22 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestGetAttPreState_HeadState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
_, err = service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
}

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
@@ -142,6 +141,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
@@ -208,12 +208,24 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
// verify conditions for FCU, notifies FCU, and saves the new head.
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
return err
}
@@ -228,18 +240,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
},
})
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
@@ -283,7 +283,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
@@ -652,28 +651,21 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
func (s *Service) spawnLateBlockTasksLoop() {
go func() {
select {
case <-s.ctx.Done():
stateSub.Unsubscribe()
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(ctx)
s.lateBlockTasks(s.ctx)
case <-ctx.Done():
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}

View File

@@ -64,7 +64,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
parentRoot := b.ParentRoot()
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
// subset of the block object.
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}
@@ -230,7 +230,9 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
eth1DepositIndex -= 1
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")

View File

@@ -12,9 +12,7 @@ import (
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
@@ -24,9 +22,7 @@ import (
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -46,18 +42,9 @@ import (
)
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
@@ -152,17 +139,8 @@ func TestStore_OnBlock(t *testing.T) {
}
func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -185,7 +163,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
@@ -196,17 +174,9 @@ func TestStore_OnBlockBatch(t *testing.T) {
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, tr := minimalTestService(t)
ctx := tr.ctx
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
@@ -227,22 +197,12 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -260,16 +220,8 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -309,16 +261,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
@@ -360,16 +304,8 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -418,17 +354,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
}
func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -566,17 +493,8 @@ func TestAncestorByDB_CtxErr(t *testing.T) {
}
func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
beaconDB := tr.db
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -657,17 +575,8 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
}
func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -732,21 +641,8 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -782,21 +678,8 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
}
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -830,39 +713,15 @@ func TestOnBlock_CanFinalize(t *testing.T) {
}
func TestOnBlock_NilBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
err = service.onBlock(ctx, nil, [32]byte{})
err := service.onBlock(tr.ctx, nil, [32]byte{})
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -885,21 +744,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -918,13 +764,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
}
func TestInsertFinalizedDeposits(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -952,13 +793,8 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -981,7 +817,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
depositCache.InsertFinalizedDeposits(ctx, 2)
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
@@ -1085,18 +921,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
aHash := common.BytesToHash([]byte("a"))
bHash := common.BytesToHash([]byte("b"))
@@ -1223,17 +1049,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
}
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
@@ -1274,21 +1091,8 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
WithAttestationPool(attestations.NewPool()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -1353,17 +1157,8 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
}
func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service, _ := minimalTestService(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
blk := util.HydrateBeaconBlock(&ethpb.BeaconBlock{Slot: 1})
wb, err := consensusblocks.NewBeaconBlock(blk)
@@ -1386,22 +1181,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1546,22 +1328,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1707,22 +1476,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -1915,27 +1671,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
newfc := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, newfc)
newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(newStateGen),
WithForkChoiceStore(newfc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithAttestationService(attSrv),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
genesisState, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := genesisState.HashTreeRoot(ctx)
@@ -2084,18 +1822,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
@@ -2147,26 +1875,16 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
logHook := logTest.NewGlobal()
fc := doublylinkedtree.New()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.lateBlockTasks(ctx)
service, tr := minimalTestService(t)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}
@@ -2177,24 +1895,14 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
})
defer resetCfg()
fc := doublylinkedtree.New()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.lateBlockTasks(ctx)
service, tr := minimalTestService(t)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot int64, delay int64) {
func driftGenesisTime(s *Service, slot, delay int64) {
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
}

View File

@@ -7,8 +7,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -28,7 +26,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
@@ -39,7 +37,7 @@ type AttestationReceiver interface {
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
if err != nil {
return nil, err
@@ -47,6 +45,9 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
}
@@ -67,20 +68,13 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
}
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := stateFeed.Subscribe(stateChannel)
func (s *Service) spawnProcessAttestationsRoutine() {
go func() {
select {
case <-s.ctx.Done():
stateSub.Unsubscribe()
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
return
case <-stateChannel:
stateSub.Unsubscribe()
break
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
@@ -117,6 +111,9 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// The caller of this function MUST hold a lock in forkchoice
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.UpdateHead")
defer span.End()
start := time.Now()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
@@ -140,16 +137,17 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
if err != nil {
log.WithError(err).Error("could not update forkchoice")
}
if changed {
s.headLock.RLock()
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.headLock.RUnlock()
if err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot); err != nil {
log.WithError(err).Error("could not update forkchoice")
s.headLock.RUnlock()
}
}

View File

@@ -7,11 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -31,22 +27,18 @@ var (
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
helpers.ClearCache()
beaconDB := testDB.SetupDB(t)
service, _ := minimalTestService(t)
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
service.genesisTime = time.Now()
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := chainService.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
_, err := service.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -69,11 +61,8 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
}
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
ctx := tr.ctx
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -96,13 +85,10 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
}
func TestProcessAttestations_Ok(t *testing.T) {
service, tr := minimalTestService(t)
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()))
ctx := tr.ctx
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
@@ -126,21 +112,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(newStateGen),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
}
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -189,21 +163,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
}
func TestService_UpdateHead_NoAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithStateGen(newStateGen),
WithForkChoiceStore(fcs),
}
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))

View File

@@ -7,12 +7,7 @@ import (
"time"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -125,22 +120,15 @@ func TestService_ReceiveBlock(t *testing.T) {
for _, tt := range tests {
wg.Add(1)
t.Run(tt.name, func(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s, tr := minimalTestService(t,
WithFinalizedStateAtStartUp(genesis),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
beaconDB := tr.db
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
WithFinalizedStateAtStartUp(genesis),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -162,25 +150,16 @@ func TestService_ReceiveBlock(t *testing.T) {
}
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
ctx := context.Background()
s, tr := minimalTestService(t,
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
ctx, beaconDB := tr.ctx, tr.db
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize it here.
_ = s.cfg.StateNotifier.StateFeed()
require.NoError(t, s.saveGenesisData(ctx, genesis))
@@ -246,17 +225,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fc := doublylinkedtree.New()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
err := s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
@@ -276,10 +246,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasBlock(t *testing.T) {
opts := testServiceOptsWithDB(t)
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s, _ := minimalTestService(t)
r := [32]byte{'a'}
if s.HasBlock(context.Background(), r) {
t.Error("Should not have block")
@@ -299,10 +266,8 @@ func TestService_HasBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -312,9 +277,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -326,9 +291,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s, _ := minimalTestService(t)
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
@@ -336,19 +299,8 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
}
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
pool := blstoexec.NewPool()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
WithBLSToExecPool(pool),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service, tr := minimalTestService(t)
pool := tr.blsPool
t.Run("pre Capella block", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyBellatrix{}

View File

@@ -27,6 +27,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -57,6 +58,8 @@ type Service struct {
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
}
// config options for the service.
@@ -83,6 +86,8 @@ type config struct {
ExecutionEngineCaller execution.EngineCaller
}
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
@@ -100,6 +105,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return nil, err
}
}
if srv.clockSetter == nil {
return nil, ErrMissingClockSetter
}
var err error
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
@@ -121,8 +129,8 @@ func (s *Service) Start() {
log.Fatal(err)
}
}
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
s.spawnProcessAttestationsRoutine()
s.spawnLateBlockTasksLoop()
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -236,13 +244,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
},
})
vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
return nil
}
@@ -359,15 +364,10 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
// We send out a state initialized event to the rest of the services
// running in the beacon node.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
},
})
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
}
}
// initializes the state and genesis block of the beacon chain to persistent storage

View File

@@ -8,13 +8,9 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v4/async/event"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
@@ -25,7 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -40,45 +36,8 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
ctx := context.Background()
var web3Service *execution.Service
@@ -141,6 +100,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationService(attService),
WithStateGen(stateGen),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
WithClockSynchronizer(startup.NewClockSynchronizer()),
}
chainService, err := NewService(ctx, opts...)
@@ -157,12 +117,14 @@ func TestChainStartStop_Initialized(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(1))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -192,12 +154,14 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
@@ -264,12 +228,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
chainService := setupBeaconChain(t, beaconDB)
gt := time.Unix(23, 0)
genesisBlk := util.NewBeaconBlock()
blkRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetSlot(0))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -290,14 +256,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
}
func TestChainService_InitializeChainInfo(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -309,23 +270,18 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, c.StartFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -345,14 +301,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
}
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
@@ -364,27 +315,21 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB := tr.ctx, tr.db
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
ss := &ethpb.StateSummary{
Slot: finalizedSlot,
Root: headRoot[:],
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
@@ -460,17 +405,21 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
stateChannel := make(chan *feed.Event, 1)
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
service.onExecutionChainStart(context.Background(), time.Now())
stateEvent := <-stateChannel
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
_, ok := stateEvent.Data.(*statefeed.InitializedData)
require.Equal(t, true, ok)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(context.Background(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
}
func BenchmarkHasBlockDB(b *testing.B) {
@@ -519,15 +468,10 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
EnableStartOptimistic: true,
})
defer resetFn()
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
@@ -538,21 +482,17 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
WithStateNotifier(&mock.MockStateNotifier{}),
WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
@@ -562,3 +502,19 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, op)
}
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
// call SetGenesis.
type MockClockSetter struct {
G *startup.Clock
Err error
}
var _ startup.ClockSetter = &MockClockSetter{}
// SetClock satisfies the ClockSetter interface.
// The value is written to an exported field 'G' so that it can be accessed in tests.
func (s *MockClockSetter) SetClock(g *startup.Clock) error {
s.G = g
return s.Err
}

View File

@@ -0,0 +1,115 @@
package blockchain
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
type mockBeaconNode struct {
stateFeed *event.Feed
}
// StateFeed mocks the same method in the beacon node.
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
if mbn.stateFeed == nil {
mbn.stateFeed = new(event.Feed)
}
return mbn.stateFeed
}
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database
fcs forkchoice.ForkChoicer
sg *stategen.State
notif statefeed.Notifier
cs *startup.ClockSynchronizer
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositcache.DepositCache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
sg := stategen.New(beaconDB, fcs)
notif := &mockBeaconNode{}
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
cs := startup.NewClockSynchronizer()
attPool := attestations.NewPool()
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositcache.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,
db: beaconDB,
fcs: fcs,
sg: sg,
notif: notif,
cs: cs,
attPool: attPool,
attSrv: attSrv,
blsPool: blsPool,
dc: dc,
}
defOpts := []Option{WithDatabase(req.db),
WithStateNotifier(req.notif),
WithStateGen(req.sg),
WithForkChoiceStore(req.fcs),
WithClockSynchronizer(req.cs),
WithAttestationPool(req.attPool),
WithAttestationService(req.attSrv),
WithBLSToExecPool(req.blsPool),
WithDepositCache(dc),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)
s, err := NewService(req.ctx, opts...)
require.NoError(t, err)
return s, req
}

View File

@@ -320,7 +320,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
}
// AttestationTargetState mocks AttestationTargetState method in chain service.
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
return s.State, nil
}
@@ -594,6 +594,6 @@ func (s *ChainService) FinalizedBlockHash() [32]byte {
}
// UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
return [32]byte{}, nil
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}

View File

@@ -12,11 +12,13 @@ go_library(
deps = [
"//api/client/builder:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -3,6 +3,7 @@ package builder
import (
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/urfave/cli/v2"
@@ -50,3 +51,11 @@ func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
return nil
}
}
// WithRegistrationCache uses a cache for the validator registrations instead of a persistent db.
func WithRegistrationCache() Option {
return func(s *Service) error {
s.registrationCache = cache.NewRegistrationCache()
return nil
}
}

View File

@@ -8,10 +8,12 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -25,6 +27,7 @@ type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
Configured() bool
}
@@ -37,10 +40,11 @@ type config struct {
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
type Service struct {
cfg *config
c builder.BuilderClient
ctx context.Context
cancel context.CancelFunc
cfg *config
c builder.BuilderClient
ctx context.Context
cancel context.CancelFunc
registrationCache *cache.RegistrationCache
}
// NewService instantiates a new service.
@@ -106,10 +110,13 @@ func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHas
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
tracing.AnnotateError(span, ErrNoBuilder)
return nil, ErrNoBuilder
}
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
h, err := s.c.GetHeader(ctx, slot, parentHash, pubKey)
tracing.AnnotateError(span, err)
return h, err
}
// Status retrieves the status of the builder relay network.
@@ -135,8 +142,12 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
return ErrNoBuilder
}
// should be removed if db is removed
idxs := make([]primitives.ValidatorIndex, 0)
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
for i := 0; i < len(reg); i++ {
r := reg[i]
@@ -150,12 +161,33 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
idxs = append(idxs, nx)
msgs = append(msgs, r.Message)
valid = append(valid, r)
indexToRegistration[nx] = r.Message
}
if err := s.c.RegisterValidator(ctx, valid); err != nil {
return errors.Wrap(err, "could not register validator(s)")
}
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
if len(indexToRegistration) != len(msgs) {
return errors.New("ids and registrations must be the same length")
}
if s.registrationCache != nil {
s.registrationCache.UpdateIndexToRegisteredMap(ctx, indexToRegistration)
return nil
} else {
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
}
}
// RegistrationByValidatorID returns either the values from the cache or db.
func (s *Service) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
if s.registrationCache != nil {
return s.registrationCache.RegistrationByIndex(id)
} else {
if s.cfg == nil || s.cfg.beaconDB == nil {
return nil, errors.New("nil beacon db")
}
return s.cfg.beaconDB.RegistrationByValidatorID(ctx, id)
}
}
// Configured returns true if the user has configured a builder client.

View File

@@ -8,6 +8,8 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -2,10 +2,11 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -15,6 +16,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// Config defines a config struct for dependencies into the service.
type Config struct {
BeaconDB db.HeadAccessDatabase
}
// MockBuilderService to mock builder.
type MockBuilderService struct {
HasConfigured bool
@@ -23,8 +29,10 @@ type MockBuilderService struct {
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
}
// Configured for mocking.
@@ -41,7 +49,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
return w, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
}
@@ -49,8 +57,8 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(ctx context.Context, slot primitives.Slot, hr [32]byte, pb [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}
w, err := builder.WrappedSignedBuilderBid(s.Bid)
@@ -60,6 +68,17 @@ func (s *MockBuilderService) GetHeader(ctx context.Context, slot primitives.Slot
return w, s.ErrGetHeader
}
// RegistrationByValidatorID returns either the values from the cache or db.
func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
if s.RegistrationCache != nil {
return s.RegistrationCache.RegistrationByIndex(id)
}
if s.Cfg.BeaconDB != nil {
return s.Cfg.BeaconDB.RegistrationByValidatorID(ctx, id)
}
return nil, cache.ErrNotFoundRegistration
}
// RegisterValidator for mocking.
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
return s.ErrRegisterValidator

View File

@@ -17,6 +17,7 @@ go_library(
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
"registration.go",
"skip_slot_cache.go",
"subnet_ids.go",
"sync_committee.go",
@@ -47,6 +48,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -64,6 +66,7 @@ go_test(
"committee_test.go",
"payload_id_test.go",
"proposer_indices_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"sync_committee_head_state_test.go",
@@ -82,7 +85,9 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -42,9 +42,16 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(maxBalanceCacheSize),
}
c := &BalanceCache{}
c.Clear()
return c
}
// Clear resets the SyncCommitteeCache to its initial state
func (c *BalanceCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.cache = lruwrpr.New(maxBalanceCacheSize)
}
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.

View File

@@ -3,16 +3,11 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
)
// FakeBalanceCache is a fake struct with 1 LRU cache for looking up balance by epoch.
type FakeBalanceCache struct {
cache *lru.Cache
lock sync.RWMutex
}
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
@@ -29,3 +24,8 @@ func (c *FakeBalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState
func (c *FakeBalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
return 0, nil
}
// Clear is a stub.
func (c *FakeBalanceCache) Clear() {
return
}

View File

@@ -56,10 +56,17 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
inProgress: make(map[string]bool),
}
cc := &CommitteeCache{}
cc.Clear()
return cc
}
// Clear resets the CommitteeCache to its initial state
func (c *CommitteeCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
c.inProgress = make(map[string]bool)
}
// Committee fetches the shuffled indices by slot and committee index. Every list of indices

View File

@@ -69,3 +69,8 @@ func (c *FakeCommitteeCache) MarkInProgress(seed [32]byte) error {
func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
return nil
}
// Clear is a stub.
func (c *FakeCommitteeCache) Clear() {
return
}

View File

@@ -129,7 +129,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
}
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) error {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
@@ -141,7 +141,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return
return nil
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
@@ -151,7 +151,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return
return nil
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
@@ -162,12 +162,10 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
}
depHash, err := d.Deposit.Data.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
return
return errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
log.WithError(err).Error("Could not insert deposit hash")
return
return errors.Wrap(err, "could not insert deposit hash")
}
insertIndex++
}
@@ -176,6 +174,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
Deposits: depositTrie,
MerkleTrieIndex: eth1DepositIndex,
}
return nil
}
// AllDepositContainers returns all historical deposit containers.

View File

@@ -416,7 +416,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 2)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -474,9 +474,9 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
Index: 2,
}
dc.deposits = oldFinalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 1)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
dc.InsertFinalizedDeposits(context.Background(), 2)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
@@ -503,7 +503,7 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.InsertFinalizedDeposits(context.Background(), 2)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -548,7 +548,7 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -623,10 +623,10 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
// Reinsert finalized deposits with a lower index.
dc.InsertFinalizedDeposits(context.Background(), 2)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
@@ -694,7 +694,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
},
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 1)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
assert.Equal(t, 2, len(deps))
@@ -751,7 +751,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
},
Index: 3,
})
dc.InsertFinalizedDeposits(context.Background(), 1)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
@@ -799,12 +799,12 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
assert.NoError(t, err)
// Perform this in a non-sensical ordering
dc.InsertFinalizedDeposits(context.Background(), 10)
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.InsertFinalizedDeposits(context.Background(), 3)
dc.InsertFinalizedDeposits(context.Background(), 4)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
// Mimick finalized deposit trie fetch.
// Mimic finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1
@@ -817,9 +817,9 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
}
insertIndex++
}
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 14)
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 14))
fd = dc.FinalizedDeposits(context.Background())
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))

View File

@@ -1,6 +1,6 @@
package cache
import "errors"
import "github.com/pkg/errors"
var (
// ErrNilValueProvided for when we try to put a nil value in a cache.
@@ -12,4 +12,6 @@ var (
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
// ErrNotFoundRegistration when validator registration does not exist in cache.
ErrNotFoundRegistration = errors.Wrap(ErrNotFound, "no validator registered")
)

View File

@@ -46,9 +46,16 @@ func proposerIndicesKeyFn(obj interface{}) (string, error) {
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
func NewProposerIndicesCache() *ProposerIndicesCache {
return &ProposerIndicesCache{
proposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
}
c := &ProposerIndicesCache{}
c.Clear()
return c
}
// Clear resets the ProposerIndicesCache to its initial state
func (c *ProposerIndicesCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.proposerIndicesCache = cache.NewFIFO(proposerIndicesKeyFn)
}
// AddProposerIndices adds ProposerIndices object to the cache.

View File

@@ -33,3 +33,7 @@ func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error)
func (c *FakeProposerIndicesCache) Len() int {
return 0
}
// Clear is a stub.
func (c *FakeProposerIndicesCache) Clear() {
}

82
beacon-chain/cache/registration.go vendored Normal file
View File

@@ -0,0 +1,82 @@
package cache
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// RegistrationCache is used to store the cached results of an Validator Registration request.
// beacon api /eth/v1/validator/register_validator
type RegistrationCache struct {
indexToRegistration map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1
lock sync.RWMutex
}
// NewRegistrationCache initializes the map and underlying cache.
func NewRegistrationCache() *RegistrationCache {
return &RegistrationCache{
indexToRegistration: make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1),
lock: sync.RWMutex{},
}
}
// RegistrationByIndex returns the registration by index in the cache and also removes items in the cache if expired.
func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
regCache.lock.RLock()
v, ok := regCache.indexToRegistration[id]
if !ok {
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
if err != nil {
return nil, errors.Wrapf(err, "failed to check registration expiration")
}
if isExpired {
regCache.lock.RUnlock()
regCache.lock.Lock()
defer regCache.lock.Unlock()
delete(regCache.indexToRegistration, id)
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
// safely convert unint64 to int64
i, err := math.Int(ts)
if err != nil {
return false, err
}
expiryDuration := params.BeaconConfig().RegistrationDuration
// registered time + expiration duration < current time = expired
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")
defer span.End()
regCache.lock.Lock()
defer regCache.lock.Unlock()
for key, value := range m {
regCache.indexToRegistration[key] = &ethpb.ValidatorRegistrationV1{
Pubkey: bytesutil.SafeCopyBytes(value.Pubkey),
FeeRecipient: bytesutil.SafeCopyBytes(value.FeeRecipient),
GasLimit: value.GasLimit,
Timestamp: value.Timestamp,
}
}
}

82
beacon-chain/cache/registration_test.go vendored Normal file
View File

@@ -0,0 +1,82 @@
package cache
import (
"context"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRegistrationCache(t *testing.T) {
hook := logTest.NewGlobal()
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
cache := NewRegistrationCache()
m := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
m[validatorIndex] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("Registration expired", func(t *testing.T) {
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
_, err := cache.RegistrationByIndex(validatorIndex2)
require.ErrorContains(t, "no validator registered", err)
require.LogsContain(t, hook, "expired")
})
t.Run("Registration close to expiration still passes", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
reg, err := cache.RegistrationByIndex(validatorIndex2)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}
func Test_RegistrationTimeStampExpired(t *testing.T) {
// expiration set at 3 epochs
t.Run("expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, true, isExpired)
})
t.Run("is not expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, false, isExpired)
})
}

View File

@@ -4,12 +4,14 @@ package cache
import (
"sync"
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/cache"
)
@@ -31,8 +33,9 @@ var (
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
// It is thread safe with concurrent read write.
type SyncCommitteeCache struct {
cache *cache.FIFO
lock sync.RWMutex
cache *cache.FIFO
lock sync.RWMutex
cleared *atomic.Uint64
}
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
@@ -51,9 +54,17 @@ type positionInCommittee struct {
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *SyncCommitteeCache {
return &SyncCommitteeCache{
cache: cache.NewFIFO(keyFn),
}
c := &SyncCommitteeCache{cleared: &atomic.Uint64{}}
c.Clear()
return c
}
// Clear resets the SyncCommitteeCache to its initial state
func (s *SyncCommitteeCache) Clear() {
s.lock.Lock()
defer s.lock.Unlock()
s.cleared.Add(1)
s.cache = cache.NewFIFO(keyFn)
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
@@ -123,6 +134,10 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
// cycled (new address), don't update it.
clearCount := s.cleared.Load()
csc, err := st.CurrentSyncCommittee()
if err != nil {
return err
@@ -162,6 +177,10 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
s.lock.Lock()
defer s.lock.Unlock()
if clearCount != s.cleared.Load() {
log.Warn("cache rotated during async committee update operation - abandoning cache update")
return nil
}
if err := s.cache.Add(&syncCommitteeIndexPosition{
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,

View File

@@ -30,3 +30,8 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
return nil
}
// Clear -- fake.
func (s *FakeSyncCommitteeCache) Clear() {
return
}

View File

@@ -208,7 +208,7 @@ func ProcessEpochParticipation(
}
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
beaconState state.BeaconState,
bal *precompute.Balance,

View File

@@ -12,6 +12,7 @@ import (
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
//
// nolint:dupword
// Spec code:
// def process_sync_committee_updates(state: BeaconState) -> None:
//
@@ -45,6 +46,7 @@ func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconSt
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
//
// nolint:dupword
// Spec code:
// def process_participation_flag_updates(state: BeaconState) -> None:
//

View File

@@ -28,6 +28,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -60,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, blocks.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
return false, nil
case err != nil:
return false, err

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
@@ -610,7 +609,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -874,7 +873,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
}, big.NewInt(0))
}, 0)
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -644,7 +643,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, big.NewInt(0))
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -1062,7 +1061,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -393,6 +393,7 @@ func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, er
// ProcessParticipationRecordUpdates rotates current/previous epoch attestations during epoch processing.
//
// nolint:dupword
// Spec pseudocode definition:
//
// def process_participation_record_updates(state: BeaconState) -> None:

View File

@@ -14,7 +14,7 @@ type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator)
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, error)
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
state state.BeaconState,
pBal *Balance,
@@ -104,7 +104,6 @@ func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator
} else {
rewardNumerator := br * (pBal.PrevEpochAttested / effectiveBalanceIncrement)
r += rewardNumerator / currentEpochBalance
}
} else {
p += br

View File

@@ -15,10 +15,10 @@ const (
BlockProcessed = iota + 1
// ChainStarted is sent when enough validators are active to start proposing blocks.
ChainStarted
// Initialized is sent when the internal beacon node's state is ready to be accessed.
Initialized
// Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
Synced
// deprecated: Initialized is sent when the internal beacon node's state is ready to be accessed.
_
// deprecated: Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
_
// Reorg is an event sent when the new head is not a descendant of the previous head.
Reorg
// FinalizedCheckpoint event.

View File

@@ -48,6 +48,7 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"main_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",
@@ -56,6 +57,7 @@ go_test(
"weak_subjectivity_test.go",
],
embed = [":go_default_library"],
race = "on",
shard_count = 2,
deps = [
"//beacon-chain/cache:go_default_library",

View File

@@ -183,11 +183,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooEarlyCount.Inc()
attReceivedTooLateCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooLateCount.Inc()
attReceivedTooEarlyCount.Inc()
return attError
}
return nil

View File

@@ -382,10 +382,10 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
// ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() {
committeeCache = cache.NewCommitteesCache()
proposerIndicesCache = cache.NewProposerIndicesCache()
syncCommitteeCache = cache.NewSyncCommittee()
balanceCache = cache.NewEffectiveBalanceCache()
committeeCache.Clear()
proposerIndicesCache.Clear()
syncCommitteeCache.Clear()
balanceCache.Clear()
}
// computeCommittee returns the requested shuffled committee out of the total committees using

View File

@@ -91,6 +91,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
defer ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
@@ -101,6 +102,8 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
var activationEpoch primitives.Epoch
@@ -118,7 +121,6 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
ClearCache()
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
@@ -188,6 +190,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
@@ -255,6 +258,8 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
defer ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -269,7 +274,6 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
ClearCache()
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
@@ -376,6 +380,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
@@ -390,6 +395,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)

View File

@@ -0,0 +1,13 @@
package helpers
import (
"os"
"testing"
)
// run ClearCache before each test to prevent cross-test side effects
func TestMain(m *testing.M) {
ClearCache()
code := m.Run()
os.Exit(code)
}

View File

@@ -75,6 +75,8 @@ func TestTotalActiveBalance(t *testing.T) {
}
func TestTotalActiveBal_ReturnMin(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
}{
@@ -96,6 +98,8 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
func TestTotalActiveBalance_WithCache(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
wantCount int

View File

@@ -25,9 +25,7 @@ var (
// along with the sync committee root.
// 1. Checks if the public key exists in the sync committee cache
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) (bool, error) {
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
@@ -36,7 +34,7 @@ func IsCurrentPeriodSyncCommittee(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, nil
return false, err
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
@@ -73,7 +71,7 @@ func IsNextPeriodSyncCommittee(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, nil
return false, err
}
committee, err := st.NextSyncCommittee()
if err != nil {
@@ -100,7 +98,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, nil
return nil, err
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
@@ -134,7 +132,7 @@ func NextPeriodSyncSubcommitteeIndices(
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, nil
return nil, err
}
committee, err := st.NextSyncCommittee()
if err != nil {

View File

@@ -17,6 +17,8 @@ import (
)
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -37,7 +39,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -47,6 +48,8 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -73,6 +76,8 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -94,11 +99,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
require.NoError(t, err)
require.ErrorContains(t, "index 12390192 out of range", err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -119,7 +126,6 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -176,11 +182,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
require.NoError(t, err)
require.ErrorContains(t, "index 120391029 out of range", err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -201,7 +209,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -211,6 +218,8 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -230,7 +239,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := syncPeriodBoundaryRoot(state)
require.NoError(t, err)
@@ -252,6 +260,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -273,11 +282,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.NoError(t, err)
require.ErrorContains(t, "index 129301923 out of range", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -298,7 +309,6 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
@@ -335,6 +345,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -356,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.NoError(t, err)
require.ErrorContains(t, "index 21093019 out of range", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
@@ -387,6 +398,8 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
}
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -412,7 +425,6 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)

View File

@@ -285,6 +285,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
// ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer.
//
// nolint:dupword
// Spec pseudocode definition:
//
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:

View File

@@ -179,6 +179,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
func TestBeaconProposerIndex_OK(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -222,6 +223,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
},
}
defer ClearCache()
for _, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
@@ -234,6 +236,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
func TestBeaconProposerIndex_BadState(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -345,6 +348,7 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 1000000, wantedChurn: 15 /* validatorCount/churnLimitQuotient */},
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
defer ClearCache()
for _, test := range tests {
ClearCache()
@@ -516,6 +520,7 @@ func TestActiveValidatorIndices(t *testing.T) {
want: []primitives.ValidatorIndex{0, 2, 3},
},
}
defer ClearCache()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)

View File

@@ -150,7 +150,7 @@ func TestSlashValidator_OK(t *testing.T) {
maxBalance := params.BeaconConfig().MaxEffectiveBalance
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
bal, err := state.BalanceAtIndex(proposer)

View File

@@ -3,6 +3,7 @@ package kv
import (
"context"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
@@ -542,7 +543,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 150, len(retrieved))
for _, b := range retrieved {
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpected block slot %d", b.Block().Slot())
}
})
}
@@ -910,25 +911,25 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
ids := []primitives.ValidatorIndex{0, 0, 0}
regs := []*ethpb.ValidatorRegistrationV1{{}, {}, {}, {}}
require.ErrorContains(t, "ids and registrations must be the same length", db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
timestamp := time.Now().Unix()
ids = []primitives.ValidatorIndex{0, 1, 2}
regs = []*ethpb.ValidatorRegistrationV1{
{
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
GasLimit: 1,
Timestamp: 2,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("b"), 48),
},
{
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
GasLimit: 3,
Timestamp: 4,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("d"), 48),
},
{
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
GasLimit: 5,
Timestamp: 6,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("f"), 48),
},
}
@@ -938,7 +939,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
require.DeepEqual(t, &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
GasLimit: 1,
Timestamp: 2,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("b"), 48),
}, f)
f, err = db.RegistrationByValidatorID(ctx, 1)
@@ -946,7 +947,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
require.DeepEqual(t, &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
GasLimit: 3,
Timestamp: 4,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("d"), 48),
}, f)
f, err = db.RegistrationByValidatorID(ctx, 2)
@@ -954,7 +955,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
require.DeepEqual(t, &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
GasLimit: 5,
Timestamp: 6,
Timestamp: uint64(timestamp),
Pubkey: bytesutil.PadTo([]byte("f"), 48),
}, f)
_, err = db.RegistrationByValidatorID(ctx, 3)

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
@@ -55,22 +56,7 @@ func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveJustifiedCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.Warnf("Recovering state summary for justified root: %#x", bytesutil.Trunc(checkpoint.Root))
if err := recoverStateSummary(ctx, tx, checkpoint.Root); err != nil {
return errors.Wrapf(errMissingStateForCheckpoint, "could not save justified checkpoint, finalized root: %#x", bytesutil.Trunc(checkpoint.Root))
}
}
return bucket.Put(justifiedCheckpointKey, enc)
})
return s.saveCheckpoint(ctx, justifiedCheckpointKey, checkpoint)
}
// SaveFinalizedCheckpoint saves finalized checkpoint in beacon chain.
@@ -80,10 +66,11 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
err = s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
@@ -98,6 +85,33 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
return s.updateFinalizedBlockRoots(ctx, tx, checkpoint)
})
tracing.AnnotateError(span, err)
return err
}
func (s *Store) saveCheckpoint(ctx context.Context, key []byte, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.saveCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
err = s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.WithField("root", fmt.Sprintf("%#x", bytesutil.Trunc(checkpoint.Root))).Warn("Recovering state summary")
if err := recoverStateSummary(ctx, tx, checkpoint.Root); err != nil {
return errMissingStateForCheckpoint
}
}
return bucket.Put(key, enc)
})
tracing.AnnotateError(span, err)
return err
}
// Recovers and saves state summary for a given root if the root has a block in the DB.

View File

@@ -97,9 +97,8 @@ func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
return valErr
}
var crtErr error
st, err = s.unmarshalState(ctx, enc, valEntries)
return crtErr
return err
})
if err != nil {
return nil, err
@@ -874,5 +873,4 @@ func (s *Store) isStateValidatorMigrationOver() (bool, error) {
return returnFlag, err
}
return returnFlag, nil
}

View File

@@ -3,8 +3,6 @@ package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
@@ -34,20 +32,5 @@ func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *eth
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastValidatedCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.Warnf("Recovering state summary for last validated root: %#x", bytesutil.Trunc(checkpoint.Root))
if err := recoverStateSummary(ctx, tx, checkpoint.Root); err != nil {
return errors.Wrapf(errMissingStateForCheckpoint, "could not save finalized checkpoint, last validated root: %#x", bytesutil.Trunc(checkpoint.Root))
}
}
return bucket.Put(lastValidatedCheckpointKey, enc)
})
return s.saveCheckpoint(ctx, lastValidatedCheckpointKey, checkpoint)
}

View File

@@ -14,7 +14,6 @@ go_library(
"metrics.go",
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
@@ -49,6 +48,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/clientstats:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
@@ -89,7 +89,6 @@ go_test(
"init_test.go",
"log_processing_test.go",
"prometheus_test.go",
"provider_test.go",
"service_test.go",
],
data = glob(["testdata/**"]),
@@ -121,7 +120,6 @@ go_test(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/clientstats:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -23,6 +23,7 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -238,7 +239,8 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
return nil, handleRPCError(err)
}
return blocks.WrappedExecutionPayloadCapella(result.Payload, big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value)))
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
return blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
}
result := &pb.ExecutionPayload{}
@@ -716,7 +718,7 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}, 0) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.

View File

@@ -127,7 +127,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
@@ -258,10 +258,9 @@ func TestClient_HTTP(t *testing.T) {
require.DeepEqual(t, want.ExecutionPayload.PrevRandao.Bytes(), pb.PrevRandao)
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), pb.ParentHash)
v, err := resp.Value()
v, err := resp.ValueInGwei()
require.NoError(t, err)
wantedValue := []byte{17, 255} // 0x11ff
require.DeepEqual(t, wantedValue, v.Bytes())
require.Equal(t, uint64(1236), v)
})
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
@@ -425,7 +424,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
@@ -453,7 +452,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -481,7 +480,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -509,7 +508,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -1325,7 +1324,7 @@ func fixtures() map[string]interface{} {
GasUsed: &hexUint,
Timestamp: &hexUint,
},
BlockValue: "0x11ff",
BlockValue: "0x11fffffffff",
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
@@ -15,7 +16,7 @@ type Option func(s *Service) error
// WithHttpEndpoint parse http endpoint for the powchain service to use.
func WithHttpEndpoint(endpointString string) Option {
return func(s *Service) error {
s.cfg.currHttpEndpoint = HttpEndpoint(endpointString)
s.cfg.currHttpEndpoint = network.HttpEndpoint(endpointString)
return nil
}
}
@@ -27,7 +28,7 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
return nil
}
// Overwrite authorization type for all endpoints to be of a bearer type.
hEndpoint := HttpEndpoint(endpointString)
hEndpoint := network.HttpEndpoint(endpointString)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)

View File

@@ -15,7 +15,7 @@ import (
func TestCleanup(t *testing.T) {
ctx := context.Background()
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
unregistered := pc.unregister()
assert.Equal(t, true, unregistered, "PowchainCollector.unregister did not return true (via prometheus.DefaultRegistry)")
// PowchainCollector is a prometheus.Collector, so we should be able to register it again
@@ -39,7 +39,7 @@ func TestCleanup(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
ticker := time.NewTicker(10 * time.Second)
cancel()
select {

View File

@@ -1,49 +0,0 @@
package execution
import (
"encoding/base64"
"strings"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
// HttpEndpoint extracts an httputils.Endpoint from the provider parameter.
func HttpEndpoint(eth1Provider string) network.Endpoint {
endpoint := network.Endpoint{
Url: "",
Auth: network.AuthorizationData{
Method: authorization.None,
Value: "",
}}
authValues := strings.Split(eth1Provider, ",")
endpoint.Url = strings.TrimSpace(authValues[0])
if len(authValues) > 2 {
log.Errorf(
"ETH1 endpoint string can contain one comma for specifying the authorization header to access the provider."+
" String contains too many commas: %d. Skipping authorization.", len(authValues)-1)
} else if len(authValues) == 2 {
switch network.Method(strings.TrimSpace(authValues[1])) {
case authorization.Basic:
basicAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(basicAuthValues) != 2 {
log.Errorf("Basic Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Basic
endpoint.Auth.Value = base64.StdEncoding.EncodeToString([]byte(basicAuthValues[1]))
}
case authorization.Bearer:
bearerAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(bearerAuthValues) != 2 {
log.Errorf("Bearer Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Bearer
endpoint.Auth.Value = bearerAuthValues[1]
}
case authorization.None:
log.Errorf("Authorization has incorrect format or authorization type is not supported.")
}
}
return endpoint
}

View File

@@ -1,74 +0,0 @@
package execution
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestHttpEndpoint(t *testing.T) {
hook := logTest.NewGlobal()
url := "http://test"
t.Run("URL", func(t *testing.T) {
endpoint := HttpEndpoint(url)
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with separator", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(" " + url + " ,")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("Basic auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Basic username:password")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Basic username:password ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Basic username:password foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Bearer auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Bearer token ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Bearer token foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Too many separators", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token,foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
}

View File

@@ -3,7 +3,6 @@ package execution
import (
"context"
"fmt"
"net/url"
"strings"
"time"
@@ -107,26 +106,10 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
client, err := network.NewExecutionRPCClient(ctx, endpoint)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
if err != nil {
return nil, err
}
case "", "ipc":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {

View File

@@ -370,7 +370,9 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// to be included (rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
if err = s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex); err != nil {
return err
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
@@ -425,7 +427,6 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
Error: error(nil),
})
headers = append(headers, header)
}
ioErr := s.rpcClient.BatchCall(elems)
if ioErr != nil {

View File

@@ -38,7 +38,7 @@ type EngineClient struct {
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
BlockValue *big.Int
BlockValue uint64
}
// NewPayload --

View File

@@ -246,7 +246,6 @@ func (r *RPCClient) BatchCall(b []rpc.BatchElem) error {
return err
}
*e.Result.(*types.HeaderInfo) = types.HeaderInfo{Number: h.Number, Time: h.Time, Hash: h.Hash()}
}
return nil
}

View File

@@ -228,6 +228,39 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
return n.root, nil
}
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
return false, nil
}
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
}
if node.slot > epochStart {
return false, nil
}
if len(node.children) == 0 {
return true, nil
}
if node.slot == epochStart {
return true, nil
}
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch >= cp.Epoch {
return false, nil
}
for _, child := range node.children {
if child.slot > epochStart {
return true, nil
}
}
return false, nil
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
@@ -509,14 +542,14 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
}
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
root := f.store.unrealizedJustifiedCheckpoint.Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}, ErrNilNode
return [32]byte{}
}
return node.payloadHash, nil
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkchoice.
@@ -560,7 +593,6 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
ForkChoiceNodes: nodes,
}
return resp, nil
}
// SetBalancesByRooter sets the balanceByRoot handler in forkchoice
@@ -595,3 +627,12 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
return nil
}
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.slot, nil
}

View File

@@ -751,11 +751,113 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'a'}
got, err := f.UnrealizedJustifiedPayloadBlockHash()
require.NoError(t, err)
got := f.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, [32]byte{'A'}, got)
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'b'}
_, err = f.UnrealizedJustifiedPayloadBlockHash()
require.ErrorIs(t, err, ErrNilNode)
}
func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children
require.NoError(t, f.InsertNode(ctx, st, root))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
require.NoError(t, err)
require.Equal(t, true, viable)
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, broot))
// Epoch start
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children but impossible checkpoint
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, croot))
// Children in same epoch
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, droot))
// Children in next epoch but boundary
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// Boundary block
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
require.NoError(t, err)
require.Equal(t, false, viable)
// Children in next epoch
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, eroot))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
}
func TestForkChoiceSlot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
_, err = f.Slot(root)
require.ErrorIs(t, ErrNilNode, err)
require.NoError(t, f.InsertNode(ctx, st, root))
slot, err := f.Slot(root)
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}

View File

@@ -53,11 +53,12 @@ type Getter interface {
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, primitives.Slot, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
FinalizedPayloadBlockHash() [32]byte
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
UnrealizedJustifiedPayloadBlockHash() ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
NodeCount() int
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
@@ -66,6 +67,7 @@ type Getter interface {
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)
ShouldOverrideFCU() bool
Slot([32]byte) (primitives.Slot, error)
}
// Setter allows to set forkchoice information

View File

@@ -28,7 +28,6 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
}
if enableDebugRPCEndpoints {
v1AlphaRegistrations = append(v1AlphaRegistrations, ethpbalpha.RegisterDebugHandler)
}
v1AlphaMux := gwruntime.NewServeMux(
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{
@@ -60,7 +59,6 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
}
if enableDebugRPCEndpoints {
ethRegistrations = append(ethRegistrations, ethpbservice.RegisterBeaconDebugHandler)
}
ethMux := gwruntime.NewServeMux(
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{

View File

@@ -133,7 +133,6 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included")
}
}
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -241,10 +240,44 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
func TestLogAggregatedPerformance(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
latestPerformance := map[primitives.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 31900000000,
},
15: {
balance: 31900000000,
},
}
aggregatedPerformance := map[primitives.ValidatorIndex]ValidatorAggregatedPerformance{
1: {
startEpoch: 0,
startBalance: 31700000000,
totalAttestedCount: 12,
totalRequestedCount: 15,
totalDistance: 14,
totalCorrectHead: 8,
totalCorrectSource: 11,
totalCorrectTarget: 12,
totalProposedCount: 1,
totalSyncCommitteeContributions: 0,
totalSyncCommitteeAggregations: 0,
},
2: {},
12: {},
15: {},
}
s := &Service{
latestPerformance: latestPerformance,
aggregatedPerformance: aggregatedPerformance,
}
s.logAggregatedPerformance()
time.Sleep(3000 * time.Millisecond)
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +

View File

@@ -19,13 +19,8 @@ import (
"github.com/sirupsen/logrus"
)
var (
// Error when event feed data is not statefeed.SyncedData.
errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData")
// Error when the context is closed while waiting for sync.
errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
)
// Error when the context is closed while waiting for sync.
var errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
// ValidatorLatestPerformance keeps track of the latest participation of the validator
type ValidatorLatestPerformance struct {
@@ -63,6 +58,7 @@ type ValidatorMonitorConfig struct {
AttestationNotifier operation.Notifier
HeadFetcher blockchain.HeadFetcher
StateGen stategen.StateManager
InitialSyncComplete chan struct{}
}
// Service is the main structure that tracks validators and reports logs and
@@ -131,7 +127,7 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription
return
}
if err := s.waitForSync(stateChannel, stateSub); err != nil {
if err := s.waitForSync(s.config.InitialSyncComplete); err != nil {
log.WithError(err)
return
}
@@ -197,24 +193,13 @@ func (s *Service) Stop() error {
}
// waitForSync waits until the beacon node is synced to the latest head.
func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error {
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.Synced {
_, ok := e.Data.(*statefeed.SyncedData)
if !ok {
return errNotSyncedData
}
return nil
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return errContextClosedWhileWaiting
case err := <-stateSub.Err():
log.WithError(err).Error("Could not subscribe to state notifier")
return err
}
func (s *Service) waitForSync(syncChan chan struct{}) error {
select {
case <-syncChan:
return nil
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return errContextClosedWhileWaiting
}
}

View File

@@ -93,6 +93,7 @@ func setupService(t *testing.T) *Service {
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
AttestationNotifier: chainService.OperationNotifier(),
InitialSyncComplete: make(chan struct{}),
},
ctx: context.Background(),
@@ -140,34 +141,9 @@ func TestNewService(t *testing.T) {
func TestStart(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
s.Start()
go func() {
select {
case stateEvent := <-stateChannel:
if stateEvent.Type == statefeed.Synced {
_, ok := stateEvent.Data.(*statefeed.SyncedData)
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
}
case <-s.ctx.Done():
}
wg.Done()
}()
for sent := 0; sent == 0; {
sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
})
}
close(s.config.InitialSyncComplete)
// wait for Logrus
time.Sleep(1000 * time.Millisecond)
@@ -267,26 +243,29 @@ func TestMonitorRoutine(t *testing.T) {
}
func TestWaitForSync(t *testing.T) {
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
ctx, cancel := context.WithCancel(context.Background())
s := &Service{ctx: ctx}
syncChan := make(chan struct{})
go func() {
err := s.waitForSync(stateChannel, stateSub)
require.NoError(t, err)
wg.Done()
// Failsafe to make sure tests never get deadlocked; we should always go through the happy path before 500ms.
// Otherwise, the NoError assertion below will fail.
time.Sleep(500 * time.Millisecond)
cancel()
}()
go func() {
close(syncChan)
}()
require.NoError(t, s.waitForSync(syncChan))
}
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
func TestWaitForSyncCanceled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
s := &Service{ctx: ctx}
syncChan := make(chan struct{})
cancel()
require.ErrorIs(t, s.waitForSync(syncChan), errContextClosedWhileWaiting)
}
func TestRun(t *testing.T) {
@@ -295,21 +274,11 @@ func TestRun(t *testing.T) {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
s.run(stateChannel, stateSub)
wg.Done()
}()
close(s.config.InitialSyncComplete)
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
//wait for Logrus
time.Sleep(1000 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
}

Some files were not shown because too many files have changed in this diff Show More