mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
203 Commits
v1.0.0-alp
...
v1.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bc7cb6959 | ||
|
|
3584bcba8e | ||
|
|
926d3b9b34 | ||
|
|
817c16a2f4 | ||
|
|
92b6e0b6af | ||
|
|
b3155a04f5 | ||
|
|
df762bbfee | ||
|
|
f79b168ab2 | ||
|
|
211d9bc0b9 | ||
|
|
386bfdd6eb | ||
|
|
ddc8dc36f8 | ||
|
|
99f15943a8 | ||
|
|
581bed2017 | ||
|
|
2d4bfbbe31 | ||
|
|
fb2dfec1f4 | ||
|
|
2e4dee5aeb | ||
|
|
46c04b98d9 | ||
|
|
301499d134 | ||
|
|
4fc0a50569 | ||
|
|
c1c0b53c25 | ||
|
|
37bf6617c0 | ||
|
|
149d3b84fa | ||
|
|
5092093389 | ||
|
|
3b34954e75 | ||
|
|
ec5e59e212 | ||
|
|
7d1a1643ee | ||
|
|
5f80754013 | ||
|
|
d9e4084d6d | ||
|
|
ec8eab21ae | ||
|
|
0cbd8bc03d | ||
|
|
e57770bd0a | ||
|
|
8c2fff3a75 | ||
|
|
4f5726b3af | ||
|
|
424488bf8a | ||
|
|
21c5ba8ed8 | ||
|
|
dbbbc7586f | ||
|
|
fcbb168c76 | ||
|
|
f1bce1001d | ||
|
|
ec77196197 | ||
|
|
a468a12ef0 | ||
|
|
b0dff891fc | ||
|
|
687251fedc | ||
|
|
a04b7c2e4f | ||
|
|
e6d688f6d5 | ||
|
|
8a3b75e9e3 | ||
|
|
a73c539fab | ||
|
|
92efe64b8a | ||
|
|
b1c047b9ee | ||
|
|
ff50ea2e0d | ||
|
|
ebb3fa71f1 | ||
|
|
9ea69a070e | ||
|
|
c59edb3358 | ||
|
|
7e2112b4ba | ||
|
|
cdbbf66027 | ||
|
|
e5e51e66e1 | ||
|
|
6eb022ffa1 | ||
|
|
e776eb5409 | ||
|
|
6a2bb65fe2 | ||
|
|
ecc25d2b8c | ||
|
|
e07a12e6b7 | ||
|
|
840ffc84ac | ||
|
|
ca081e8639 | ||
|
|
e54ac48f9d | ||
|
|
075f1458b4 | ||
|
|
be6481e178 | ||
|
|
ab76bdad15 | ||
|
|
e7723c4d1f | ||
|
|
a688b9e030 | ||
|
|
ff15621fe1 | ||
|
|
4a78071e41 | ||
|
|
113b2cd6cf | ||
|
|
13af8a7a37 | ||
|
|
7131cd991c | ||
|
|
17a08a75ea | ||
|
|
ddbece5988 | ||
|
|
ff658ba641 | ||
|
|
483f7f8177 | ||
|
|
b4c1c1db9b | ||
|
|
70d923cf85 | ||
|
|
e4e8dd4838 | ||
|
|
553492e6e9 | ||
|
|
9554ad3221 | ||
|
|
d2f7240255 | ||
|
|
42b7a37281 | ||
|
|
9a6a70e804 | ||
|
|
9bd3cede23 | ||
|
|
544dac298a | ||
|
|
78ca8c9265 | ||
|
|
a5ce6db38e | ||
|
|
0b53a89d00 | ||
|
|
fdef581e02 | ||
|
|
7c5ee0a806 | ||
|
|
81b553a00a | ||
|
|
816eb94adf | ||
|
|
ec0af98a9e | ||
|
|
a39db494eb | ||
|
|
05678b6724 | ||
|
|
bec91d348e | ||
|
|
1bc86d2658 | ||
|
|
9db6c0042b | ||
|
|
3d70d757a1 | ||
|
|
e25cd08049 | ||
|
|
dccf0992e5 | ||
|
|
f6ed3f141a | ||
|
|
88b2a4c905 | ||
|
|
ab40a112c5 | ||
|
|
329fbff814 | ||
|
|
f31d40cf34 | ||
|
|
49909ce351 | ||
|
|
53ab1dff6d | ||
|
|
b502876e98 | ||
|
|
f474c4b1c5 | ||
|
|
78fd3b8a87 | ||
|
|
7e44d1eec7 | ||
|
|
9a0d579607 | ||
|
|
07e7e030d9 | ||
|
|
a81c863ddb | ||
|
|
7aaefd123e | ||
|
|
7abc1feaf5 | ||
|
|
acf201428e | ||
|
|
daf0b51361 | ||
|
|
1462b7e57e | ||
|
|
ec9c6f30bf | ||
|
|
0b64a335c8 | ||
|
|
1caf2ca96f | ||
|
|
5f9ea35b3f | ||
|
|
7076a1ec4a | ||
|
|
a840fa563d | ||
|
|
803d7c9bd2 | ||
|
|
76300cef09 | ||
|
|
e5ed2cd141 | ||
|
|
a005c77b3f | ||
|
|
8f04c555fc | ||
|
|
0a007384c8 | ||
|
|
022b6667e5 | ||
|
|
022b09f2e2 | ||
|
|
2f6f79724f | ||
|
|
7de161e917 | ||
|
|
b9844024b4 | ||
|
|
5cd6f65a2c | ||
|
|
88083d1000 | ||
|
|
9e712e4598 | ||
|
|
b742511193 | ||
|
|
aaabec5cb7 | ||
|
|
e9c23673c5 | ||
|
|
06d16a24b9 | ||
|
|
ac1a4a078c | ||
|
|
a019a0db4c | ||
|
|
db48e12270 | ||
|
|
9434d66ad0 | ||
|
|
3d0fc8bc64 | ||
|
|
7cc32c4dda | ||
|
|
0214553415 | ||
|
|
b5a913d862 | ||
|
|
43765b5cb0 | ||
|
|
4c09e59b3b | ||
|
|
551b03d6e6 | ||
|
|
c4e64afd07 | ||
|
|
d98a6dda8f | ||
|
|
8aaa5b6ad0 | ||
|
|
f92244d497 | ||
|
|
e91165b0b4 | ||
|
|
e15a0b08aa | ||
|
|
a3a77ab5a8 | ||
|
|
650ec797da | ||
|
|
f629c72107 | ||
|
|
98a20766c9 | ||
|
|
1f707842d2 | ||
|
|
c944f29c7c | ||
|
|
6b5265d2d4 | ||
|
|
8f64eb622e | ||
|
|
4ddacd57c6 | ||
|
|
a66f434236 | ||
|
|
796c336a29 | ||
|
|
48fcb08ebc | ||
|
|
1315a15d9d | ||
|
|
25ebed9a70 | ||
|
|
703907bd99 | ||
|
|
d4e6ce6998 | ||
|
|
390a589afb | ||
|
|
d34156bfe6 | ||
|
|
7c8492e83f | ||
|
|
668163d740 | ||
|
|
7ad2929f0f | ||
|
|
9ce64e2428 | ||
|
|
ae78546323 | ||
|
|
23bce8d0c5 | ||
|
|
29137f7b39 | ||
|
|
bf4a8dcee9 | ||
|
|
7b5f71229e | ||
|
|
4d7797827e | ||
|
|
c0ed43d920 | ||
|
|
842c15856b | ||
|
|
20ac925ee4 | ||
|
|
6e8ff10003 | ||
|
|
419fad07cd | ||
|
|
cf1c346beb | ||
|
|
3e0b20529b | ||
|
|
d9ae2073e2 | ||
|
|
7b3efcf62b | ||
|
|
c7d01fd73c | ||
|
|
2916d183e8 | ||
|
|
c24fb792cb |
4
.bazelrc
4
.bazelrc
@@ -5,6 +5,9 @@ test --test_verbose_timeout_warnings
|
||||
test --build_tests_only
|
||||
test --test_output=errors
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
|
||||
# Clearly indicate that coverage is enabled to disable certain nogo checks.
|
||||
coverage --define=coverage_enabled=1
|
||||
|
||||
@@ -42,6 +45,7 @@ build:kafka_enabled --define kafka_enabled=true
|
||||
build:kafka_enabled --define gotags=kafka_enabled
|
||||
|
||||
build:blst_enabled --define blst_enabled=true
|
||||
build:blst_enabled --define gotags=blst_enabled
|
||||
|
||||
# Release flags
|
||||
build:release --workspace_status_command=./scripts/workspace_status.sh
|
||||
|
||||
@@ -34,19 +34,5 @@ build --flaky_test_attempts=5
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
# Enable kafka for CI tests only.
|
||||
test --config=kafka_enabled
|
||||
|
||||
# Disable blst for CI tests.
|
||||
test --define blst_enabled=true
|
||||
|
||||
build --bes_backend=grpcs://builds.prylabs.net:1985
|
||||
build --bes_results_url=https://builds.prylabs.net/invocation/
|
||||
|
||||
# Disable flaky test detection for fuzzing.
|
||||
test:fuzz --flaky_test_attempts=1
|
||||
|
||||
# Expose test environment variables in CI
|
||||
test:fuzzit --test_env=GITHUB_REF
|
||||
test:fuzzit --test_env=GITHUB_SHA
|
||||
test:fuzzit --test_env=DOCKER_HOST
|
||||
|
||||
12
.deepsource.toml
Normal file
12
.deepsource.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
version = 1
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "test-coverage"
|
||||
enabled = true
|
||||
91
.policy.yml
Normal file
91
.policy.yml
Normal file
@@ -0,0 +1,91 @@
|
||||
policy:
|
||||
approval:
|
||||
- or:
|
||||
- only test files are changed
|
||||
- only proto files are changed
|
||||
- touches consensus critical code
|
||||
- touches sync/blockchain/p2p code
|
||||
- large line count
|
||||
- is critical priority
|
||||
approval_rules:
|
||||
- name: only test files are changed
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*_test.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: only proto files are changed
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: touches consensus critical code
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "beacon-chain/core/*"
|
||||
- "beacon-chain/state/*"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: touches sync/blockchain/p2p code
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "beacon-chain/blockchain/*"
|
||||
- "beacon-chain/sync/*"
|
||||
- "beacon-chain/p2p/*"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: large line count
|
||||
if:
|
||||
modified_lines:
|
||||
total: "> 1000"
|
||||
changed_files:
|
||||
ignore:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: is critical priority
|
||||
if:
|
||||
has_labels:
|
||||
- "Priority: Critical"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 3
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
45
TERMS_OF_SERVICE.md
Normal file
45
TERMS_OF_SERVICE.md
Normal file
@@ -0,0 +1,45 @@
|
||||
## Terms of Use
|
||||
|
||||
Effective as of Oct 14, 2020
|
||||
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
|
||||
|
||||
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
|
||||
|
||||
### Warranty Disclaimer
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
|
||||
### Limitation of Liability
|
||||
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
|
||||
### Indemnification
|
||||
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
|
||||
damages, costs, and expenses (including reasonable attorneys’ fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
|
||||
### Compliance with Laws and Tax Obligations
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
|
||||
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
|
||||
### Miscellaneous
|
||||
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
|
||||
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
|
||||
|
||||
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
23
WORKSPACE
23
WORKSPACE
@@ -161,6 +161,10 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
@@ -215,8 +219,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7e5f838e0f9110471ef8be9401ea687a8ed4d499664dc0eac34ecfdfd03c2ac3",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.12.3/general.tar.gz",
|
||||
sha256 = "6b3498001de98c477aa2c256beffc20a85ce1b12b8e0f8e88502a5c3a18c01de",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0-rc.0/general.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -336,6 +340,21 @@ go_binary(
|
||||
urls = ["https://github.com/ferranbt/fastssz/archive/06015a5d84f9e4eefe2c21377ca678fa8f1a1b09.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "prysm_web_ui",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "site",
|
||||
srcs = glob(["**/*"]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "6bb16ff0dc9348090cc31a9ea453643d32b617e66ac6e7bb38985d530070631b",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/0.0.2-alpha/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -19,15 +19,16 @@ go_library(
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/journald:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
"//shared/tos:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -53,19 +54,20 @@ go_image(
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//shared/tos:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/journald:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"checkpoint_info_cache.go",
|
||||
"head.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
@@ -51,13 +50,11 @@ go_library(
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -80,9 +77,9 @@ go_test(
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"chain_info_test.go",
|
||||
"checkpoint_info_cache_test.go",
|
||||
"head_test.go",
|
||||
"info_test.go",
|
||||
"metrics_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
|
||||
@@ -97,6 +97,9 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return 0
|
||||
}
|
||||
@@ -106,6 +109,9 @@ func (s *Service) HeadSlot() uint64 {
|
||||
|
||||
// HeadRoot returns the root of the head of the chain.
|
||||
func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.headRoot() != params.BeaconConfig().ZeroHash {
|
||||
r := s.headRoot()
|
||||
return r[:], nil
|
||||
@@ -131,6 +137,9 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head block from DB.
|
||||
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.hasHeadState() {
|
||||
return s.headBlock(), nil
|
||||
}
|
||||
@@ -144,6 +153,8 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
|
||||
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
|
||||
defer span.End()
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
ok := s.hasHeadState()
|
||||
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
@@ -152,11 +163,14 @@ func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
return s.headState(ctx), nil
|
||||
}
|
||||
|
||||
return s.beaconDB.HeadState(ctx)
|
||||
return s.stateGen.StateByRoot(ctx, s.headRoot())
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
@@ -165,6 +179,9 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]ui
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
@@ -174,6 +191,9 @@ func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
|
||||
|
||||
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -183,6 +203,9 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
|
||||
// HeadETH1Data returns the eth1data of the current head state.
|
||||
func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return ðpb.Eth1Data{}
|
||||
}
|
||||
@@ -202,6 +225,9 @@ func (s *Service) GenesisTime() time.Time {
|
||||
// GenesisValidatorRoot returns the genesis validator
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -210,6 +236,9 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -226,9 +255,6 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, the block must be recent. Check recent canonical roots
|
||||
// mapping which uses proto array fork choice.
|
||||
s.recentCanonicalBlocksLock.RLock()
|
||||
defer s.recentCanonicalBlocksLock.RUnlock()
|
||||
return s.recentCanonicalBlocks[blockRoot], nil
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.forkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ = ChainInfoFetcher(&Service{})
|
||||
var _ = TimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
@@ -103,13 +103,15 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{slot: 100, state: s}
|
||||
assert.Equal(t, uint64(100), c.headSlot())
|
||||
assert.Equal(t, uint64(100), c.HeadSlot())
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{root: [32]byte{'A'}}
|
||||
assert.Equal(t, [32]byte{'A'}, c.headRoot())
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
@@ -179,3 +181,23 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCanonical_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, can)
|
||||
|
||||
can, err = c.IsCanonical(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, can)
|
||||
}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// This defines the max number of checkpoint info this cache can store.
|
||||
// Each cache is calculated at 3MB(30K validators), the total cache size is around 100MB.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxInfoSize = 32
|
||||
|
||||
// This tracks the number of check point info requests that aren't present in the cache.
|
||||
infoMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_info_cache_miss",
|
||||
Help: "The number of check point info requests that aren't present in the cache.",
|
||||
})
|
||||
// This tracks the number of check point info requests that are in the cache.
|
||||
infoHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_info_cache_hit",
|
||||
Help: "The number of check point info requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// checkPtInfoCache is a struct with 1 LRU cache for looking up check point info by checkpoint.
|
||||
type checkPtInfoCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newCheckPointInfoCache creates a new checkpoint info cache for storing/accessing processed check point info object.
|
||||
func newCheckPointInfoCache() *checkPtInfoCache {
|
||||
cache, err := lru.New(maxInfoSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &checkPtInfoCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// get fetches check point info by check point. Returns the reference of the CheckPtInfo, nil if doesn't exist.
|
||||
func (c *checkPtInfoCache) get(cp *ethpb.Checkpoint) (*pb.CheckPtInfo, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, exists := c.cache.Get(h)
|
||||
|
||||
if exists && item != nil {
|
||||
infoHit.Inc()
|
||||
// Copy here is unnecessary since the returned check point info object
|
||||
// will only be used to verify attestation signature.
|
||||
return item.(*pb.CheckPtInfo), nil
|
||||
}
|
||||
|
||||
infoMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// put adds CheckPtInfo object to the cache. This method also trims the least
|
||||
// recently added CheckPtInfo object if the cache size has ready the max cache size limit.
|
||||
func (c *checkPtInfoCache) put(cp *ethpb.Checkpoint, info *pb.CheckPtInfo) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cache.Add(h, info)
|
||||
return nil
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestHotStateCache_RoundTrip(t *testing.T) {
|
||||
c := newCheckPointInfoCache()
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
}
|
||||
info, err := c.get(cp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*pb.CheckPtInfo)(nil), info)
|
||||
|
||||
i := &pb.CheckPtInfo{
|
||||
Seed: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
GenesisRoot: bytesutil.PadTo([]byte{'d'}, 32),
|
||||
ActiveIndices: []uint64{0, 1, 2, 3},
|
||||
}
|
||||
|
||||
require.NoError(t, c.put(cp, i))
|
||||
info, err = c.get(cp)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, info, i)
|
||||
}
|
||||
|
||||
func TestHotStateCache_CanPrune(t *testing.T) {
|
||||
c := newCheckPointInfoCache()
|
||||
for i := 0; i < maxInfoSize+1; i++ {
|
||||
cp := ðpb.Checkpoint{Epoch: uint64(i), Root: make([]byte, 32)}
|
||||
require.NoError(t, c.put(cp, &pb.CheckPtInfo{}))
|
||||
}
|
||||
require.Equal(t, len(c.cache.Keys()), maxInfoSize)
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -24,7 +23,7 @@ type head struct {
|
||||
slot uint64 // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block *ethpb.SignedBeaconBlock // current head block.
|
||||
state *state.BeaconState // current head state.
|
||||
state *stateTrie.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
@@ -52,12 +51,24 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.genesisRoot
|
||||
}
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.forkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.beaconDB.Block(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.updateRecentCanonicalBlocks(ctx, headRoot); err != nil {
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -72,7 +83,11 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
if headRoot == s.headRoot() {
|
||||
r, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -101,16 +116,17 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != s.headRoot() {
|
||||
headSlot := s.HeadSlot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
|
||||
"oldSlot": fmt.Sprintf("%d", s.headSlot()),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: &statefeed.ReorgData{
|
||||
NewSlot: newHeadBlock.Block.Slot,
|
||||
OldSlot: s.headSlot(),
|
||||
OldSlot: headSlot,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -131,7 +147,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *state.BeaconState) error {
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *stateTrie.BeaconState) error {
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
@@ -149,7 +165,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
@@ -165,7 +181,7 @@ func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
// assumes that state being passed into the method will not be modified by any other alternate
|
||||
// caller which holds the state's reference.
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
@@ -179,89 +195,57 @@ func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// This returns the head slot.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
// This returns the head root.
|
||||
// It does a full copy on head root for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headRoot() [32]byte {
|
||||
if s.head == nil {
|
||||
return params.BeaconConfig().ZeroHash
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.root
|
||||
}
|
||||
|
||||
// This returns the head block.
|
||||
// It does a full copy on head block for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return stateTrie.CopySignedBeaconBlock(s.head.block)
|
||||
}
|
||||
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.state.Copy()
|
||||
}
|
||||
|
||||
// This returns the genesis validator root of the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headGenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// Returns true if head state exists.
|
||||
func (s *Service) hasHeadState() bool {
|
||||
// HasHeadState returns true if head state exists.
|
||||
func (s *Service) HasHeadState() bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head != nil && s.head.state != nil
|
||||
return s.hasHeadState()
|
||||
}
|
||||
|
||||
// This updates recent canonical block mapping. It uses input head root and retrieves
|
||||
// all the canonical block roots that are ancestor of the input head block root.
|
||||
func (s *Service) updateRecentCanonicalBlocks(ctx context.Context, headRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateRecentCanonicalBlocks")
|
||||
defer span.End()
|
||||
|
||||
s.recentCanonicalBlocksLock.Lock()
|
||||
defer s.recentCanonicalBlocksLock.Unlock()
|
||||
|
||||
s.recentCanonicalBlocks = make(map[[32]byte]bool)
|
||||
s.recentCanonicalBlocks[headRoot] = true
|
||||
nodes := s.forkChoiceStore.Nodes()
|
||||
node := s.forkChoiceStore.Node(headRoot)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for node.Parent() != protoarray.NonExistentNode {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
node = nodes[node.Parent()]
|
||||
s.recentCanonicalBlocks[node.Root()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
// Returns true if head state exists.
|
||||
// This is the lock free version.
|
||||
func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -93,39 +94,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
}
|
||||
|
||||
func TestUpdateRecentCanonicalBlocks_CanUpdateWithoutParent(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), r))
|
||||
canonical, err := service.IsCanonical(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
}
|
||||
|
||||
func TestUpdateRecentCanonicalBlocks_CanUpdateWithParent(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
oldHead := [32]byte{'a'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 1, oldHead, [32]byte{'g'}, [32]byte{}, 0, 0))
|
||||
currentHead := [32]byte{'b'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 3, currentHead, oldHead, [32]byte{}, 0, 0))
|
||||
forkedRoot := [32]byte{'c'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 2, forkedRoot, oldHead, [32]byte{}, 0, 0))
|
||||
|
||||
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), currentHead))
|
||||
canonical, err := service.IsCanonical(context.Background(), currentHead)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
canonical, err = service.IsCanonical(context.Background(), oldHead)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
canonical, err = service.IsCanonical(context.Background(), forkedRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, canonical, "Block should not be canonical")
|
||||
}
|
||||
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
@@ -137,3 +105,19 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
}
|
||||
|
||||
@@ -34,7 +34,12 @@ const template = `<html>
|
||||
|
||||
// TreeHandler is a handler to serve /tree page in metrics.
|
||||
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if s.headState(r.Context()) == nil {
|
||||
headState, err := s.HeadState(r.Context())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if headState == nil {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
@@ -47,7 +52,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
graph.Attr("labeljust", "l")
|
||||
|
||||
dotNodes := make([]*dot.Node, len(nodes))
|
||||
avgBalance := uint64(averageBalance(s.headState(r.Context()).Balances()))
|
||||
avgBalance := uint64(averageBalance(headState.Balances()))
|
||||
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
// Construct label for each node.
|
||||
@@ -63,7 +68,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
dotN = graph.Node(index).Box().Attr("label", label)
|
||||
}
|
||||
|
||||
if nodes[i].Slot() == s.headSlot() &&
|
||||
if nodes[i].Slot() == s.HeadSlot() &&
|
||||
nodes[i].BestDescendant() == ^uint64(0) &&
|
||||
nodes[i].Parent() != ^uint64(0) {
|
||||
dotN = dotN.Attr("color", "green")
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -100,13 +98,6 @@ var (
|
||||
Name: "beacon_reorg_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
sentBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_sent_latency_milliseconds",
|
||||
Help: "Captures blocks broadcast time. Blocks sent in milliseconds distribution",
|
||||
Buckets: []float64{1000, 2000, 3000, 4000, 5000, 6000},
|
||||
},
|
||||
)
|
||||
attestationInclusionDelay = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "attestation_inclusion_delay_slots",
|
||||
@@ -114,19 +105,10 @@ var (
|
||||
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
|
||||
},
|
||||
)
|
||||
// TODO(7141): Remove deprecated metrics below.
|
||||
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_eligible_balances",
|
||||
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
|
||||
})
|
||||
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_voted_target_balances",
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
clockTimeSlot.Set(float64(clockSlot))
|
||||
beaconSlot.Set(float64(stateSlot))
|
||||
beaconHeadSlot.Set(float64(headSlot))
|
||||
@@ -137,8 +119,8 @@ func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, fina
|
||||
}
|
||||
|
||||
// reportEpochMetrics reports epoch related metrics.
|
||||
func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
currentEpoch := state.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.BeaconState) error {
|
||||
currentEpoch := postState.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
@@ -156,8 +138,8 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range state.Validators() {
|
||||
bal, err := state.BalanceAtIndex(uint64(i))
|
||||
for i, validator := range postState.Validators() {
|
||||
bal, err := postState.BalanceAtIndex(uint64(i))
|
||||
if err != nil {
|
||||
log.Errorf("Could not load validator balance: %v", err)
|
||||
continue
|
||||
@@ -206,41 +188,36 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
|
||||
|
||||
// Last justified slot
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint().Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint().Root)))
|
||||
beaconCurrentJustifiedEpoch.Set(float64(postState.CurrentJustifiedCheckpoint().Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.CurrentJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last previous justified slot
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint().Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint().Root)))
|
||||
beaconPrevJustifiedEpoch.Set(float64(postState.PreviousJustifiedCheckpoint().Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.PreviousJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last finalized slot
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint().Root)))
|
||||
beaconFinalizedEpoch.Set(float64(postState.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
|
||||
currentEth1DataDepositCount.Set(float64(state.Eth1Data().DepositCount))
|
||||
|
||||
if precompute.Balances != nil {
|
||||
totalEligibleBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
|
||||
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
|
||||
prevEpochActiveBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(precompute.Balances.PrevEpochAttested))
|
||||
prevEpochTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
|
||||
prevEpochHeadBalances.Set(float64(precompute.Balances.PrevEpochHeadAttested))
|
||||
}
|
||||
refMap := state.FieldReferencesCount()
|
||||
for name, val := range refMap {
|
||||
stateTrieReferences.WithLabelValues(name).Set(float64(val))
|
||||
}
|
||||
}
|
||||
|
||||
// This captures metrics for block sent time by subtracts slot start time.
|
||||
func captureSentTimeMetric(genesisTime uint64, currentSlot uint64) error {
|
||||
startTime, err := helpers.SlotToTime(genesisTime, currentSlot)
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diffMs := timeutils.Now().Sub(startTime) / time.Millisecond
|
||||
sentBlockPropagationHistogram.Observe(float64(diffMs))
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
|
||||
prevEpochHeadBalances.Set(float64(b.PrevEpochHeadAttested))
|
||||
|
||||
refMap := postState.FieldReferencesCount()
|
||||
for name, val := range refMap {
|
||||
stateTrieReferences.WithLabelValues(name).Set(float64(val))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
26
beacon-chain/blockchain/metrics_test.go
Normal file
26
beacon-chain/blockchain/metrics_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 0}}))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
@@ -8,10 +8,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -69,53 +66,6 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui
|
||||
return nil, ErrTargetRootNotInDB
|
||||
}
|
||||
|
||||
if featureconfig.Get().UseCheckPointInfoCache {
|
||||
c, err := s.AttestationCheckPtInfo(ctx, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.verifyAttTargetEpoch(ctx, uint64(s.genesisTime.Unix()), uint64(timeutils.Now().Unix()), tgt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
if err := s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommittee(c.ActiveIndices, bytesutil.ToBytes32(c.Seed), a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domain, err := helpers.Domain(c.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, c.GenesisRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubkeys = append(pubkeys, pk)
|
||||
}
|
||||
if err := attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return indexedAtt.AttestingIndices, nil
|
||||
}
|
||||
|
||||
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
|
||||
// save it to the cache.
|
||||
baseState, err := s.getAttPreState(ctx, tgt)
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -19,10 +19,7 @@ import (
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
cachedState, err := s.checkpointStateCache.StateByCheckpoint(ctx, c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
@@ -30,103 +27,59 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
if err := s.checkpointStateCache.MarkInProgress(c); err != nil {
|
||||
if errors.Is(err, cache.ErrAlreadyInProgress) {
|
||||
cachedState, err = s.checkpointStateCache.StateByCheckpoint(ctx, c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if err := s.checkpointStateCache.MarkNotInProgress(c); err != nil {
|
||||
log.WithError(err).Error("Failed to mark cache not in progress")
|
||||
}
|
||||
}()
|
||||
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
}
|
||||
|
||||
epochStartSlot, err := helpers.StartSlot(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState = baseState.Copy()
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
has, err := s.stateGen.HasState(ctx, bytesutil.ToBytes32(c.Root))
|
||||
// To avoid sharing the same state across checkpoint state cache and hot state cache,
|
||||
// we don't add the state to check point cache.
|
||||
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
}
|
||||
return baseState, nil
|
||||
|
||||
}
|
||||
|
||||
// getAttCheckPtInfo retrieves the check point info given a check point. Check point info enables the node
|
||||
// to efficiently verify attestation signature without using beacon state. This function utilizes
|
||||
// the checkpoint info cache and will update the check point info cache on miss.
|
||||
func (s *Service) getAttCheckPtInfo(ctx context.Context, c *ethpb.Checkpoint, e uint64) (*pb.CheckPtInfo, error) {
|
||||
// Return checkpoint info if exists in cache.
|
||||
info, err := s.checkPtInfoCache.get(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if info != nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Retrieve checkpoint state to compute checkpoint info.
|
||||
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
}
|
||||
epochStartSlot, err := helpers.StartSlot(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState = baseState.Copy()
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
}
|
||||
f := baseState.Fork()
|
||||
g := bytesutil.ToBytes32(baseState.GenesisValidatorRoot())
|
||||
seed, err := helpers.Seed(baseState, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := helpers.ActiveValidatorIndices(baseState, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validators := baseState.ValidatorsReadOnly()
|
||||
pks := make([][]byte, len(validators))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
pk := validators[i].PublicKey()
|
||||
pks[i] = pk[:]
|
||||
}
|
||||
|
||||
// Cache and return the checkpoint info.
|
||||
info = &pb.CheckPtInfo{
|
||||
Fork: f,
|
||||
GenesisRoot: g[:],
|
||||
Seed: seed[:],
|
||||
ActiveIndices: indices,
|
||||
PubKeys: pks,
|
||||
}
|
||||
if err := s.checkPtInfoCache.put(c, info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
@@ -162,7 +115,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
|
||||
}
|
||||
|
||||
// verifyLMDFFGConsistent verifies LMD GHOST and FFG votes are consistent with each other.
|
||||
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot []byte, lmdRoot []byte) error {
|
||||
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot, lmdRoot []byte) error {
|
||||
ffgSlot, err := helpers.StartSlot(ffgEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,11 +2,12 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -130,118 +130,6 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnAttestationUsingCheckptCache(t *testing.T) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{UseCheckPointInfoCache: true})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(db, sc),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(db, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := testutil.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := testutil.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := testutil.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s = testutil.NewBeaconState()
|
||||
err = s.SetFork(&pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}},
|
||||
wantedErr: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}}},
|
||||
wantedErr: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "nil attestation",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "nil attestation.Data field",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
a: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: nil,
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
wantedErr: "nil attestation.Data.Target field",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
@@ -292,11 +180,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
s1, err = service.checkpointState.StateByCheckpoint(cp1)
|
||||
s1, err = service.checkpointStateCache.StateByCheckpoint(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
s2, err = service.checkpointState.StateByCheckpoint(cp2)
|
||||
s2, err = service.checkpointStateCache.StateByCheckpoint(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
@@ -332,7 +220,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), checkpoint.Epoch*params.BeaconConfig().SlotsPerEpoch, "Incorrectly returned base state")
|
||||
|
||||
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
|
||||
cached, err := service.checkpointStateCache.StateByCheckpoint(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
@@ -347,13 +235,53 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
|
||||
cached, err = service.checkpointStateCache.StateByCheckpoint(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_CheckpointState_InProgress(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := uint64(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
require.NoError(t, service.checkpointStateCache.MarkInProgress(checkpoint))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
response, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(baseState.InnerStateUnsafe(), response.InnerStateUnsafe()) {
|
||||
t.Error("Expected equal responses from cache")
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
assert.NoError(t, service.checkpointStateCache.AddCheckpointState(checkpoint, baseState))
|
||||
assert.NoError(t, service.checkpointStateCache.MarkNotInProgress(checkpoint))
|
||||
}()
|
||||
|
||||
testutil.WaitTimeout(&wg, time.Second*2)
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
@@ -491,46 +419,86 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
assert.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
func TestGetAttCheckptInfo(t *testing.T) {
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 128)
|
||||
b := testutil.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, r))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
checkpoint := ðpb.Checkpoint{Root: r[:]}
|
||||
|
||||
returned, err := service.getAttCheckPtInfo(ctx, checkpoint, 0)
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
seed, err := helpers.Seed(baseState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
indices, err := helpers.ActiveValidatorIndices(baseState, 0)
|
||||
require.NoError(t, err)
|
||||
validators := baseState.ValidatorsReadOnly()
|
||||
pks := make([][]byte, len(validators))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
pk := validators[i].PublicKey()
|
||||
pks[i] = pk[:]
|
||||
}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
wanted := &pb.CheckPtInfo{
|
||||
Fork: baseState.Fork(),
|
||||
GenesisRoot: baseState.GenesisValidatorRoot(),
|
||||
Seed: seed[:],
|
||||
ActiveIndices: indices,
|
||||
PubKeys: pks,
|
||||
}
|
||||
require.DeepEqual(t, wanted, returned)
|
||||
|
||||
cached, err := service.checkPtInfoCache.get(checkpoint)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
|
||||
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wanted, cached)
|
||||
}
|
||||
|
||||
@@ -121,12 +121,26 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
}
|
||||
|
||||
// Update deposit cache.
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, int64(postState.Eth1DepositIndex()))
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
if featureconfig.Get().EnablePruningDepositProofs {
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
|
||||
return s.handleEpochBoundary(postState)
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
@@ -161,12 +175,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
|
||||
return nil
|
||||
}
|
||||
|
||||
var postState *stateTrie.BeaconState
|
||||
if featureconfig.Get().InitSyncNoVerify {
|
||||
postState, err = state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
|
||||
} else {
|
||||
postState, err = state.ExecuteStateTransition(ctx, preState, signed)
|
||||
}
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
@@ -200,7 +209,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
|
||||
}
|
||||
}
|
||||
|
||||
return s.handleEpochBoundary(postState)
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
|
||||
@@ -245,8 +254,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
// Save potential boundary states.
|
||||
if helpers.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(preState); err != nil {
|
||||
return nil, nil, fmt.Errorf("could not handle epoch boundary state")
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
}
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
@@ -280,7 +289,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
@@ -313,9 +322,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(postState *stateTrie.BeaconState) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
|
||||
if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
reportEpochMetrics(postState)
|
||||
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
if err != nil {
|
||||
@@ -354,7 +365,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
root [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func (s *Service) CurrentSlot() uint64 {
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.getBlockPreState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
@@ -56,7 +56,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
|
||||
@@ -87,7 +87,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
|
||||
// VerifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.VerifyBlkDescendant")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
defer span.End()
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
|
||||
@@ -255,7 +255,39 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.ancestor")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
ar, err = s.ancestorByDB(ctx, r, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
if !s.forkChoiceStore.HasParent(r) {
|
||||
return nil, errors.New("could not find root in fork choice store")
|
||||
}
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
||||
defer span.End()
|
||||
|
||||
// Stop recursive ancestry lookup if context is cancelled.
|
||||
@@ -263,13 +295,6 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
if s.forkChoiceStore.HasParent(r) {
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
signed, err := s.beaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
@@ -284,10 +309,10 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
}
|
||||
b := signed.Block
|
||||
if b.Slot == slot || b.Slot < slot {
|
||||
return root, nil
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
@@ -310,10 +335,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -339,7 +361,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
||||
|
||||
@@ -146,8 +146,8 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
blks := []*ethpb.SignedBeaconBlock{}
|
||||
blkRoots := [][32]byte{}
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState *stateTrie.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
|
||||
@@ -319,7 +319,6 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
st := testutil.NewBeaconState()
|
||||
service.initSyncState[r] = st.Copy()
|
||||
require.NoError(t, db.SaveState(ctx, st.Copy(), r))
|
||||
|
||||
// Could update
|
||||
@@ -516,6 +515,15 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, uint64(0), slot, "Unexpected slot")
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx, &Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
_, err = service.ancestorByDB(ctx, [32]byte{}, 0)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -562,6 +570,82 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b100 := testutil.NewBeaconBlock()
|
||||
b100.Block.Slot = 100
|
||||
b100.Block.ParentRoot = r1[:]
|
||||
r100, err := b100.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b200 := testutil.NewBeaconBlock()
|
||||
b200.Block.Slot = 200
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b100 := testutil.NewBeaconBlock()
|
||||
b100.Block.Slot = 100
|
||||
b100.Block.ParentRoot = r1[:]
|
||||
r100, err := b100.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b200 := testutil.NewBeaconBlock()
|
||||
b200.Block.Slot = 200
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
@@ -752,3 +836,15 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
service.head = &head{}
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
|
||||
}
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -23,10 +20,9 @@ import (
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
|
||||
AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error)
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
|
||||
@@ -43,35 +39,14 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
|
||||
// This updates fork choice head, if a new head could not be updated due to
|
||||
// long range or intermediate forking. It simply logs a warning and returns nil
|
||||
// as that's more appropriate than returning errors.
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidAttestation returns true if the attestation can be verified against its pre-state.
|
||||
func (s *Service) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
baseState, err := s.AttestationPreState(ctx, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get attestation pre state")
|
||||
return false
|
||||
}
|
||||
|
||||
if err := blocks.VerifyAttestationSignature(ctx, baseState, att); err != nil {
|
||||
log.WithError(err).Error("Failed to validate attestation")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AttestationPreState returns the pre state of attestation.
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
@@ -84,24 +59,37 @@ func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestatio
|
||||
return s.getAttPreState(ctx, att.Data.Target)
|
||||
}
|
||||
|
||||
// AttestationCheckPtInfo returns the check point info of attestation that can be used to verify the attestation
|
||||
// contents and signatures.
|
||||
func (s *Service) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.getAttCheckPtInfo(ctx, att.Data.Target, helpers.SlotToEpoch(att.Data.Slot))
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
return s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
|
||||
// When the input root is not be consistent with finalized store then we know it is not
|
||||
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
ss, err := helpers.StartSlot(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, root, ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(f.Root, r) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
// Wait for state to be initialized.
|
||||
@@ -138,7 +126,7 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !s.verifyCheckpointEpoch(a.Data.Target) {
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -155,23 +143,3 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This verifies the epoch of input checkpoint is within current epoch and previous epoch
|
||||
// with respect to current time. Returns true if it's within, false if it's not.
|
||||
func (s *Service) verifyCheckpointEpoch(c *ethpb.Checkpoint) bool {
|
||||
now := uint64(timeutils.Now().Unix())
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
|
||||
var prevEpoch uint64
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -9,34 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
assert.Equal(t, true, chainService.verifyCheckpointEpoch(ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
assert.Equal(t, false, chainService.verifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttestationPreState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
|
||||
_, err := chainService.AttestationCheckPtInfo(context.Background(), ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: e}}})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestAttestationCheckPtInfo_FarFutureSlot(t *testing.T) {
|
||||
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
|
||||
@@ -7,12 +7,16 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = 100
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
@@ -58,8 +62,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log block sync status.
|
||||
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
|
||||
@@ -95,7 +104,7 @@ func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.Sign
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -139,10 +148,12 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
// log.Fatalf will prevent defer from being called
|
||||
span.End()
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
}
|
||||
@@ -177,3 +188,21 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality uint64
|
||||
if currentEpoch > s.finalizedCheckpt.Epoch {
|
||||
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= uint64(epochsSinceFinalitySaveHotStateDB) {
|
||||
s.stateGen.EnableSaveHotStateToDB(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.stateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestService_ReceiveBlock(t *testing.T) {
|
||||
@@ -362,7 +364,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{})
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
@@ -373,3 +375,41 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(db, stateSummaryCache)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -34,50 +34,46 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
initSyncState map[[32]byte]*stateTrie.BeaconState
|
||||
boundaryRoots [][32]byte
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
recentCanonicalBlocks map[[32]byte]bool
|
||||
recentCanonicalBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
checkPtInfoCache *checkPtInfoCache
|
||||
wsEpoch uint64
|
||||
wsRoot []byte
|
||||
wsVerified bool
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsEpoch uint64
|
||||
wsRoot []byte
|
||||
wsVerified bool
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
@@ -104,39 +100,31 @@ type Config struct {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
recentCanonicalBlocks: make(map[[32]byte]bool),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
checkPtInfoCache: newCheckPointInfoCache(),
|
||||
wsEpoch: cfg.WspEpoch,
|
||||
wsRoot: cfg.WspBlockRoot,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
wsEpoch: cfg.WspEpoch,
|
||||
wsRoot: cfg.WspBlockRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
beaconState, err := s.beaconDB.HeadState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
@@ -145,27 +133,25 @@ func (s *Service) Start() {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
|
||||
if beaconState == nil {
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Before the first finalized epoch, in the current epoch,
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil {
|
||||
r, err = genesisBlock.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err = s.stateGen.StateByRoot(s.ctx, r)
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Before the first finalized epoch, in the current epoch,
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil {
|
||||
r, err = genesisBlock.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
@@ -207,6 +193,19 @@ func (s *Service) Start() {
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot,
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
@@ -330,11 +329,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
if err := s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
@@ -346,12 +341,6 @@ func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearCachedStates removes all stored caches states. This is done after the node
|
||||
// is synced.
|
||||
func (s *Service) ClearCachedStates() {
|
||||
s.initSyncState = map[[32]byte]*stateTrie.BeaconState{}
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
@@ -429,7 +418,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
if flags.Get().UnsafeSync {
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
@@ -438,7 +427,13 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
headState, err := s.beaconDB.HeadState(ctx)
|
||||
finalizedState, err := s.stateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
|
||||
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
@@ -478,7 +473,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
|
||||
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.forkChoiceStore = store
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummaryCache) *Service {
|
||||
endpoint := "http://127.0.0.1"
|
||||
@@ -218,7 +218,9 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
if headBlk == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.headRoot() == params.BeaconConfig().ZeroHash {
|
||||
r, err := bc.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) == params.BeaconConfig().ZeroHash {
|
||||
t.Error("Canonical root for slot 0 can't be zeros after initialize beacon chain")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ type ChainService struct {
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *pb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
DB db.Database
|
||||
@@ -80,7 +81,8 @@ func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
type MockStateNotifier struct {
|
||||
feed *event.Feed
|
||||
feed *event.Feed
|
||||
feedLock sync.Mutex
|
||||
|
||||
recv []*feed.Event
|
||||
recvLock sync.Mutex
|
||||
@@ -98,6 +100,9 @@ func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
msn.feedLock.Lock()
|
||||
defer msn.feedLock.Unlock()
|
||||
|
||||
if msn.feed == nil && msn.recvCh == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
if msn.RecordEvents {
|
||||
@@ -141,7 +146,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@@ -168,7 +173,7 @@ func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethp
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, roots [][32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@@ -197,7 +202,7 @@ func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.Sig
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@@ -232,7 +237,7 @@ func (ms *ChainService) HeadSlot() uint64 {
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
func (ms *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
if len(ms.Root) > 0 {
|
||||
return ms.Root, nil
|
||||
}
|
||||
@@ -270,7 +275,7 @@ func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
func (ms *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -280,12 +285,12 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
|
||||
}
|
||||
|
||||
// AttestationPreState mocks AttestationPreState method in chain service.
|
||||
func (ms *ChainService) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
func (ms *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (ms *ChainService) HeadValidatorsIndices(_ context.Context, epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
@@ -293,7 +298,7 @@ func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (ms *ChainService) HeadSeed(_ context.Context, epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
@@ -323,26 +328,27 @@ func (ms *ChainService) CurrentSlot() uint64 {
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
func (ms *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
}
|
||||
|
||||
// IsValidAttestation always returns true.
|
||||
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
func (ms *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
|
||||
return ms.ValidAttestation
|
||||
}
|
||||
|
||||
// IsCanonical returns and determines whether a block with the provided root is part of
|
||||
// the canonical chain.
|
||||
func (ms *ChainService) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
func (ms *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error) {
|
||||
if ms.CanonicalRoots != nil {
|
||||
_, ok := ms.CanonicalRoots[r]
|
||||
return ok, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ClearCachedStates does nothing.
|
||||
func (ms *ChainService) ClearCachedStates() {}
|
||||
|
||||
// HasInitSyncBlock mocks the same method in the chain service.
|
||||
func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
|
||||
func (ms *ChainService) HasInitSyncBlock(_ [32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -352,17 +358,28 @@ func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (ms *ChainService) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
func (ms *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return ms.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (ms *ChainService) VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error {
|
||||
func (ms *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
|
||||
func (ms *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
|
||||
if !bytes.Equal(r, ms.FinalizedCheckPoint.Root) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationCheckPtInfo mocks AttestationCheckPtInfo and always returns nil.
|
||||
func (ms *ChainService) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
func (ms *ChainService) AttestationCheckPtInfo(_ context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
f := ms.State.Fork()
|
||||
g := bytesutil.ToBytes32(ms.State.GenesisValidatorRoot())
|
||||
seed, err := helpers.Seed(ms.State, helpers.SlotToEpoch(att.Data.Slot), params.BeaconConfig().DomainBeaconAttester)
|
||||
|
||||
5
beacon-chain/cache/BUILD.bazel
vendored
5
beacon-chain/cache/BUILD.bazel
vendored
@@ -2,6 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
# gazelle:ignore committee_disabled.go
|
||||
# gazelle:ignore proposer_indices_disabled.go
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
@@ -14,12 +15,15 @@ go_library(
|
||||
"skip_slot_cache.go",
|
||||
"state_summary.go",
|
||||
"subnet_ids.go",
|
||||
"proposer_indices_type.go",
|
||||
] + select({
|
||||
"//fuzz:fuzzing_enabled": [
|
||||
"committee_disabled.go",
|
||||
"proposer_indices_disabled.go"
|
||||
],
|
||||
"//conditions:default": [
|
||||
"committee.go",
|
||||
"proposer_indices.go",
|
||||
],
|
||||
}),
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
@@ -57,6 +61,7 @@ go_test(
|
||||
"hot_state_cache_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"proposer_indices_test.go"
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
13
beacon-chain/cache/attestation_data.go
vendored
13
beacon-chain/cache/attestation_data.go
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -99,10 +98,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
if featureconfig.Get().ReduceAttesterStateCopy {
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
return item.(*attestationReqResWrapper).res, nil
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
@@ -138,7 +134,7 @@ func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest)
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
func (c *AttestationCache) Put(_ context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
@@ -167,10 +163,7 @@ func wrapperToKey(i interface{}) (string, error) {
|
||||
}
|
||||
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
if featureconfig.Get().ReduceAttesterStateCopy {
|
||||
return fmt.Sprintf("%d", req.Slot), nil
|
||||
}
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
return fmt.Sprintf("%d", req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
|
||||
89
beacon-chain/cache/checkpoint_state.go
vendored
89
beacon-chain/cache/checkpoint_state.go
vendored
@@ -1,8 +1,10 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -13,10 +15,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
|
||||
// a CheckpointState struct.
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
|
||||
// window to accept attestation based on latest spec.
|
||||
@@ -35,8 +33,9 @@ var (
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
type CheckpointStateCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
}
|
||||
|
||||
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
|
||||
@@ -46,22 +45,45 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
panic(err)
|
||||
}
|
||||
return &CheckpointStateCache{
|
||||
cache: cache,
|
||||
cache: cache,
|
||||
inProgress: map[string]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(ctx context.Context, cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
k, err := checkpointKey(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, exists := c.cache.Get(h)
|
||||
delay := minDelay
|
||||
|
||||
// Another identical request may be in progress already. Let's wait until
|
||||
// any in progress request resolves or our timeout is exceeded.
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[k] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
|
||||
// This increasing backoff is to decrease the CPU cycles while waiting
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
item, exists := c.cache.Get(k)
|
||||
if exists && item != nil {
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
@@ -77,10 +99,47 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s *stateTrie.BeaconState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
k, err := checkpointKey(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.cache.Add(h, s)
|
||||
c.cache.ContainsOrAdd(k, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *CheckpointStateCache) MarkInProgress(cp *ethpb.Checkpoint) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
k, err := checkpointKey(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.inProgress[k] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[k] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *CheckpointStateCache) MarkNotInProgress(cp *ethpb.Checkpoint) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
k, err := checkpointKey(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(c.inProgress, k)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkpointKey(cp *ethpb.Checkpoint) (string, error) {
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(h[:]), nil
|
||||
}
|
||||
|
||||
9
beacon-chain/cache/checkpoint_state_test.go
vendored
9
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -1,6 +1,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -23,13 +24,13 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
state, err := cache.StateByCheckpoint(context.Background(), cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Expected state not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCheckpointState(cp1, st))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
state, err = cache.StateByCheckpoint(context.Background(), cp1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !proto.Equal(state.InnerStateUnsafe(), st.InnerStateUnsafe()) {
|
||||
@@ -43,11 +44,11 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cache.AddCheckpointState(cp2, st2))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp2)
|
||||
state, err = cache.StateByCheckpoint(context.Background(), cp2)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st2.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
state, err = cache.StateByCheckpoint(context.Background(), cp1)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
}
|
||||
|
||||
53
beacon-chain/cache/committee.go
vendored
53
beacon-chain/cache/committee.go
vendored
@@ -104,35 +104,6 @@ func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *CommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
committees := &Committees{ProposerIndices: indices}
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
committees, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return ErrNotCommittee
|
||||
}
|
||||
committees.ProposerIndices = indices
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
@@ -181,30 +152,6 @@ func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
return len(item.SortedIndices), nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
}
|
||||
|
||||
// HasEntry returns true if the committee cache has a value.
|
||||
func (c *CommitteeCache) HasEntry(seed string) bool {
|
||||
_, ok, err := c.CommitteeCache.GetByKey(seed)
|
||||
|
||||
30
beacon-chain/cache/committee_test.go
vendored
30
beacon-chain/cache/committee_test.go
vendored
@@ -87,35 +87,6 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
assert.Equal(t, len(item.SortedIndices), count)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
seed := [32]byte{'A'}
|
||||
indices, err := cache.ProposerIndices(seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddProposerIndicesList(seed, indices))
|
||||
|
||||
received, err := cache.ProposerIndices(seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
|
||||
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
indices, err = cache.ProposerIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddProposerIndicesList(item.Seed, indices))
|
||||
|
||||
received, err = cache.ProposerIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
@@ -150,7 +121,6 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
Seed: seed,
|
||||
ShuffledIndices: []uint64{0},
|
||||
SortedIndices: []uint64{},
|
||||
ProposerIndices: []uint64{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
1
beacon-chain/cache/committees.go
vendored
1
beacon-chain/cache/committees.go
vendored
@@ -12,5 +12,4 @@ type Committees struct {
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
ProposerIndices []uint64
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -24,6 +24,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -50,17 +50,17 @@ type FinalizedDeposits struct {
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainStartPubkeys map[string]bool
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsLock sync.RWMutex
|
||||
chainStartPubkeys map[string]bool
|
||||
chainStartPubkeysLock sync.RWMutex
|
||||
}
|
||||
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
finalizedDepositsTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,11 +68,10 @@ func New() (*DepositCache, error) {
|
||||
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
chainStartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
chainStartPubkeys: make(map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -119,7 +118,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits
|
||||
insertIndex := dc.finalizedDeposits.MerkleTrieIndex + 1
|
||||
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
continue
|
||||
@@ -132,7 +131,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
|
||||
return
|
||||
}
|
||||
depositTrie.Insert(depHash[:], int(insertIndex))
|
||||
depositTrie.Insert(depHash[:], insertIndex)
|
||||
insertIndex++
|
||||
}
|
||||
|
||||
@@ -156,6 +155,8 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.Deposi
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainStartPubkeysLock.Lock()
|
||||
defer dc.chainStartPubkeysLock.Unlock()
|
||||
dc.chainStartPubkeys[pubkey] = true
|
||||
}
|
||||
|
||||
@@ -163,6 +164,8 @@ func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey stri
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
dc.chainStartPubkeysLock.RLock()
|
||||
defer dc.chainStartPubkeysLock.RUnlock()
|
||||
if dc.chainStartPubkeys != nil {
|
||||
return dc.chainStartPubkeys[pubkey]
|
||||
}
|
||||
@@ -258,3 +261,28 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex > int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
dc.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
const nilDepositErr = "Ignoring nil deposit insertion"
|
||||
|
||||
var _ = DepositFetcher(&DepositCache{})
|
||||
var _ DepositFetcher = (*DepositCache)(nil)
|
||||
|
||||
func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
@@ -387,7 +387,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
@@ -445,7 +445,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
@@ -573,3 +573,137 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
proof[i] = make([]byte, 32)
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
var _ PendingDepositsFetcher = (*DepositCache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
@@ -29,14 +28,6 @@ func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
proof[i] = make([]byte, 32)
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
proof1 := makeDepositProof()
|
||||
|
||||
5
beacon-chain/cache/feature_flag_test.go
vendored
5
beacon-chain/cache/feature_flag_test.go
vendored
@@ -10,5 +10,8 @@ import (
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
|
||||
defer resetCfg()
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
// os.Exit will prevent defer from being called
|
||||
resetCfg()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
88
beacon-chain/cache/proposer_indices.go
vendored
Normal file
88
beacon-chain/cache/proposer_indices.go
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
// +build !libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxProposerIndicesCacheSize defines the max number of proposer indices on per block root basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxProposerIndicesCacheSize = uint64(8)
|
||||
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
Help: "The number of proposer indices requests that aren't present in the cache.",
|
||||
})
|
||||
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
|
||||
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_hit",
|
||||
Help: "The number of proposer indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type ProposerIndicesCache struct {
|
||||
ProposerIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// proposerIndicesKeyFn takes the block root as the key to retrieve proposer indices in a given epoch.
|
||||
func proposerIndicesKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*ProposerIndices)
|
||||
if !ok {
|
||||
return "", ErrNotProposerIndices
|
||||
}
|
||||
|
||||
return key(info.BlockRoot), nil
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *ProposerIndicesCache {
|
||||
return &ProposerIndicesCache{
|
||||
ProposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *ProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.ProposerIndicesCache.AddIfNotPresent(p); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.ProposerIndicesCache, maxProposerIndicesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
ProposerIndicesCacheHit.Inc()
|
||||
} else {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*ProposerIndices)
|
||||
if !ok {
|
||||
return nil, ErrNotProposerIndices
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
}
|
||||
24
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
24
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// +build libfuzzer
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type FakeProposerIndicesCache struct {
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *FakeProposerIndicesCache {
|
||||
return &FakeProposerIndicesCache{}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
63
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
63
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProposerKeyFn_OK(t *testing.T) {
|
||||
item := &ProposerIndices{
|
||||
BlockRoot: [32]byte{'A'},
|
||||
ProposerIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := proposerIndicesKeyFn(item)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, key(item.BlockRoot), k)
|
||||
}
|
||||
|
||||
func TestProposerKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := proposerIndicesKeyFn("bad")
|
||||
assert.Equal(t, ErrNotProposerIndices, err)
|
||||
}
|
||||
|
||||
func TestProposerCache_AddProposerIndicesList(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
bRoot := [32]byte{'A'}
|
||||
indices, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddProposerIndices(&ProposerIndices{
|
||||
ProposerIndices: indices,
|
||||
BlockRoot: bRoot,
|
||||
}))
|
||||
|
||||
received, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
|
||||
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
|
||||
received, err = cache.ProposerIndices(item.BlockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, item.ProposerIndices, received)
|
||||
}
|
||||
|
||||
func TestProposerCache_CanRotate(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
for i := 0; i < int(maxProposerIndicesCacheSize)+1; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &ProposerIndices{BlockRoot: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
}
|
||||
|
||||
k := cache.ProposerIndicesCache.ListKeys()
|
||||
assert.Equal(t, maxProposerIndicesCacheSize, uint64(len(k)))
|
||||
}
|
||||
13
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
13
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
|
||||
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
|
||||
// a ProposerIndices struct.
|
||||
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
|
||||
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
ProposerIndices []uint64
|
||||
}
|
||||
24
beacon-chain/cache/skip_slot_cache.go
vendored
24
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -30,7 +30,7 @@ type SkipSlotCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
disabled bool // Allow for programmatic toggling of the cache, useful during initial sync.
|
||||
inProgress map[uint64]bool
|
||||
inProgress map[[32]byte]bool
|
||||
}
|
||||
|
||||
// NewSkipSlotCache initializes the map and underlying cache.
|
||||
@@ -41,7 +41,7 @@ func NewSkipSlotCache() *SkipSlotCache {
|
||||
}
|
||||
return &SkipSlotCache{
|
||||
cache: cache,
|
||||
inProgress: make(map[uint64]bool),
|
||||
inProgress: make(map[[32]byte]bool),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (c *SkipSlotCache) Disable() {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
|
||||
defer span.End()
|
||||
if c.disabled {
|
||||
@@ -77,7 +77,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[slot] {
|
||||
if !c.inProgress[r] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
item, exists := c.cache.Get(slot)
|
||||
item, exists := c.cache.Get(r)
|
||||
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
@@ -106,7 +106,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
||||
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
@@ -114,16 +114,16 @@ func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.inProgress[slot] {
|
||||
if c.inProgress[r] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[slot] = true
|
||||
c.inProgress[r] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
@@ -131,18 +131,18 @@ func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, slot)
|
||||
delete(c.inProgress, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state *stateTrie.BeaconState) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(slot, state.Copy())
|
||||
c.cache.Add(r, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
11
beacon-chain/cache/skip_slot_cache_test.go
vendored
11
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -15,21 +15,22 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
state, err := c.Get(ctx, 5)
|
||||
r := [32]byte{'a'}
|
||||
state, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Empty cache returned an object")
|
||||
|
||||
require.NoError(t, c.MarkInProgress(5))
|
||||
require.NoError(t, c.MarkInProgress(r))
|
||||
|
||||
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, 5, state))
|
||||
require.NoError(t, c.MarkNotInProgress(5))
|
||||
require.NoError(t, c.Put(ctx, r, state))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
|
||||
res, err := c.Get(ctx, 5)
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
|
||||
}
|
||||
|
||||
6
beacon-chain/cache/state_summary.go
vendored
6
beacon-chain/cache/state_summary.go
vendored
@@ -50,11 +50,11 @@ func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
|
||||
s.initSyncStateSummariesLock.RLock()
|
||||
defer s.initSyncStateSummariesLock.RUnlock()
|
||||
|
||||
blks := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
|
||||
summaries := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
|
||||
for _, b := range s.initSyncStateSummaries {
|
||||
blks = append(blks, b)
|
||||
summaries = append(summaries, b)
|
||||
}
|
||||
return blks
|
||||
return summaries
|
||||
}
|
||||
|
||||
// Clear clears out the initial sync state summaries cache.
|
||||
|
||||
12
beacon-chain/cache/subnet_ids.go
vendored
12
beacon-chain/cache/subnet_ids.go
vendored
@@ -25,12 +25,12 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2
|
||||
attesterCache, err := lru.New(int(cacheSize))
|
||||
cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2)
|
||||
attesterCache, err := lru.New(cacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aggregatorCache, err := lru.New(int(cacheSize))
|
||||
aggregatorCache, err := lru.New(cacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func newSubnetIDs() *subnetIDs {
|
||||
}
|
||||
|
||||
// AddAttesterSubnetID adds the subnet index for subscribing subnet for the attester of a given slot.
|
||||
func (c *subnetIDs) AddAttesterSubnetID(slot uint64, subnetID uint64) {
|
||||
func (c *subnetIDs) AddAttesterSubnetID(slot, subnetID uint64) {
|
||||
c.attesterLock.Lock()
|
||||
defer c.attesterLock.Unlock()
|
||||
|
||||
@@ -69,7 +69,7 @@ func (c *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
|
||||
}
|
||||
|
||||
// AddAggregatorSubnetID adds the subnet ID for subscribing subnet for the aggregator of a given slot.
|
||||
func (c *subnetIDs) AddAggregatorSubnetID(slot uint64, subnetID uint64) {
|
||||
func (c *subnetIDs) AddAggregatorSubnetID(slot, subnetID uint64) {
|
||||
c.aggregatorLock.Lock()
|
||||
defer c.aggregatorLock.Unlock()
|
||||
|
||||
@@ -113,7 +113,7 @@ func (c *subnetIDs) GetAllSubnets() []uint64 {
|
||||
defer c.subnetsLock.RUnlock()
|
||||
|
||||
itemsMap := c.persistentSubnets.Items()
|
||||
committees := []uint64{}
|
||||
var committees []uint64
|
||||
|
||||
for _, v := range itemsMap {
|
||||
if v.Expired() {
|
||||
|
||||
2
beacon-chain/cache/subnet_ids_test.go
vendored
2
beacon-chain/cache/subnet_ids_test.go
vendored
@@ -42,12 +42,10 @@ func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
|
||||
pubkeySet := [][48]byte{}
|
||||
c := newSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkeySet = append(pubkeySet, pubkey)
|
||||
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//validator/accounts:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -49,6 +50,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"attestation_regression_test.go",
|
||||
"attestation_test.go",
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
@@ -61,6 +63,7 @@ go_test(
|
||||
"proposer_slashing_test.go",
|
||||
"randao_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 2,
|
||||
deps = [
|
||||
|
||||
@@ -158,7 +158,7 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
c := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
if att.Data.CommitteeIndex > c {
|
||||
if att.Data.CommitteeIndex >= c {
|
||||
return nil, fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
|
||||
}
|
||||
|
||||
@@ -314,7 +314,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
@@ -365,7 +365,7 @@ func VerifyAttSigUseCheckPt(ctx context.Context, c *pb.CheckPtInfo, att *ethpb.A
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
|
||||
44
beacon-chain/core/blocks/attestation_regression_test.go
Normal file
44
beacon-chain/core/blocks/attestation_regression_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
// Beaconfuzz discovered an off by one issue where an attestation could be produced which would pass
|
||||
// validation when att.Data.CommitteeIndex is 1 and the committee count per slot is also 1. The only
|
||||
// valid att.Data.Committee index would be 0, so this is an off by one error.
|
||||
// See: https://github.com/sigp/beacon-fuzz/issues/78
|
||||
func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) {
|
||||
attData, err := ioutil.ReadFile("testdata/beaconfuzz_78_attestation.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
att := ðpb.Attestation{}
|
||||
if err := att.UnmarshalSSZ(attData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateData, err := ioutil.ReadFile("testdata/beaconfuzz_78_beacon.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
spb := &pb.BeaconState{}
|
||||
if err := spb.UnmarshalSSZ(stateData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, err := state.InitializeFromProtoUnsafe(spb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = blocks.ProcessAttestationNoVerifySignature(ctx, st, att)
|
||||
require.ErrorContains(t, "committee index 1 >= committee count 1", err)
|
||||
}
|
||||
@@ -639,7 +639,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111111}
|
||||
atts := []*ethpb.Attestation{}
|
||||
var atts []*ethpb.Attestation
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
|
||||
@@ -115,7 +115,7 @@ func VerifyAttesterSlashing(ctx context.Context, beaconState *stateTrie.BeaconSt
|
||||
// # Surround vote
|
||||
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
|
||||
// )
|
||||
func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.AttestationData) bool {
|
||||
func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
@@ -57,7 +56,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{}
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := uint64(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
@@ -71,13 +70,12 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
want := fmt.Sprint("attestations are not slashable")
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
registry := []*ethpb.Validator{}
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := uint64(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
@@ -116,9 +114,8 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
want := fmt.Sprint("validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE")
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
|
||||
@@ -6,9 +6,7 @@ import (
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -17,13 +15,13 @@ import (
|
||||
func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ctx := context.Background()
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
att := ð.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessAttestationNoVerifySignature(ctx, s, att)
|
||||
_ = err
|
||||
@@ -32,14 +30,14 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
block := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessBlockHeader(context.Background(), s, block)
|
||||
_ = err
|
||||
@@ -48,13 +46,13 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ba := []byte{}
|
||||
var ba []byte
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := []byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
@@ -100,11 +98,11 @@ func TestFuzzareEth1DataEqual_10000(t *testing.T) {
|
||||
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
stateVotes := []*eth.Eth1Data{}
|
||||
var stateVotes []*eth.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := beaconstate.InitializeFromProto(ðereum_beacon_p2p_v1.BeaconState{
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Eth1DataVotes: stateVotes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -116,13 +114,13 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
block := ð.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessBlockHeaderNoVerify(s, block)
|
||||
_ = err
|
||||
@@ -131,13 +129,13 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessRandao(context.Background(), s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -148,13 +146,13 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessRandaoNoVerify(s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
@@ -165,13 +163,13 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -182,12 +180,12 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
proposerSlashing := ð.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyProposerSlashing(s, proposerSlashing)
|
||||
_ = err
|
||||
@@ -196,13 +194,13 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -213,13 +211,13 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attesterSlashing := ð.AttesterSlashing{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttesterSlashing(ctx, s, attesterSlashing)
|
||||
_ = err
|
||||
@@ -250,13 +248,13 @@ func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestations_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestations(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -267,13 +265,13 @@ func TestFuzzProcessAttestations_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestationsNoVerifySignature(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -284,13 +282,13 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestation(ctx, s, attestation)
|
||||
if err != nil && r != nil {
|
||||
@@ -301,13 +299,13 @@ func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
idxAttestation := ð.IndexedAttestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyIndexedAttestation(ctx, s, idxAttestation)
|
||||
_ = err
|
||||
@@ -316,13 +314,13 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttestationSignature(ctx, s, attestation)
|
||||
_ = err
|
||||
@@ -331,13 +329,13 @@ func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessDeposits(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -348,14 +346,14 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessPreGenesisDeposits(ctx, s, []*eth.Deposit{deposit})
|
||||
if err != nil && r != nil {
|
||||
@@ -366,13 +364,13 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
@@ -383,12 +381,12 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = verifyDeposit(s, deposit)
|
||||
_ = err
|
||||
@@ -397,13 +395,13 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -414,12 +412,12 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(context.Background(), s, b)
|
||||
if err != nil && r != nil {
|
||||
@@ -431,7 +429,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ve := ð.SignedVoluntaryExit{}
|
||||
val := &beaconstate.ReadOnlyValidator{}
|
||||
val := &stateTrie.ReadOnlyValidator{}
|
||||
fork := &pb.Fork{}
|
||||
var slot uint64
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
aggSigs := []bls.Signature{}
|
||||
var aggSigs []bls.Signature
|
||||
for _, index := range setA {
|
||||
sig := privKeys[index].Sign(signingRoot[:])
|
||||
aggSigs = append(aggSigs, sig)
|
||||
|
||||
@@ -158,8 +158,7 @@ func ProcessDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositSig := deposit.Data.Signature
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
|
||||
return beaconState, nil
|
||||
@@ -223,7 +222,7 @@ func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) e
|
||||
}
|
||||
|
||||
// Deprecated: This method uses deprecated ssz.SigningRoot.
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error {
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
|
||||
return depositutil.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
@@ -132,7 +132,8 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
sk := bls.RandKey()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
@@ -149,7 +150,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// state.eth1_data_votes.append(body.eth1_data)
|
||||
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
|
||||
// state.latest_eth1_data = body.eth1_data
|
||||
func ProcessEth1DataInBlock(ctx context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func ProcessEth1DataInBlock(_ context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
block := b.Block
|
||||
if beaconState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
|
||||
@@ -13,6 +13,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ValidatorAlreadyExitedMsg defines a message saying that a validator has already exited.
|
||||
var ValidatorAlreadyExitedMsg = "validator has already submitted an exit, which will take place at epoch"
|
||||
|
||||
// ValidatorCannotExitYetMsg defines a message saying that a validator cannot exit
|
||||
// because it has not been active long enough.
|
||||
var ValidatorCannotExitYetMsg = "validator has not been active long enough to exit"
|
||||
|
||||
// ProcessVoluntaryExits is one of the operations performed
|
||||
// on each processed beacon block to determine which validators
|
||||
// should exit the state's validator registry.
|
||||
@@ -37,7 +44,7 @@ import (
|
||||
// # Initiate exit
|
||||
// initiate_validator_exit(state, exit.validator_index)
|
||||
func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
@@ -162,9 +169,7 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return fmt.Errorf(
|
||||
"validator has already submitted an exit, which will take place at epoch: %v",
|
||||
validator.ExitEpoch())
|
||||
return fmt.Errorf("%s: %v", ValidatorAlreadyExitedMsg, validator.ExitEpoch())
|
||||
}
|
||||
// Exits must specify an epoch when they become valid; they are not valid before then.
|
||||
if currentEpoch < exit.Epoch {
|
||||
@@ -173,7 +178,8 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod {
|
||||
return fmt.Errorf(
|
||||
"validator has not been active long enough to exit: %d epochs vs required %d epochs",
|
||||
"%s: %d epochs vs required %d epochs",
|
||||
ValidatorCannotExitYetMsg,
|
||||
currentEpoch,
|
||||
validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod,
|
||||
)
|
||||
|
||||
@@ -171,7 +171,9 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
err = state.SetSlot(state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, err)
|
||||
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := state.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
val.PublicKey = priv.PublicKey().Marshal()
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
// # Verify proposer signature
|
||||
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
func ProcessBlockHeader(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
block *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
||||
@@ -48,7 +48,8 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
@@ -126,7 +127,8 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
validators[5896].PublicKey = priv.PublicKey().Marshal()
|
||||
@@ -164,7 +166,8 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
bh.Slot = 9
|
||||
require.NoError(t, state.SetLatestBlockHeader(bh))
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
validators[5896].PublicKey = priv.PublicKey().Marshal()
|
||||
@@ -202,7 +205,8 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
parentRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -247,7 +251,8 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
@@ -307,7 +312,8 @@ func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
//
|
||||
// slash_validator(state, proposer_slashing.proposer_index)
|
||||
func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
||||
@@ -194,6 +194,13 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
beaconState, sks := testutil.DeterministicGenesisState(t, 2)
|
||||
currentSlot := uint64(0)
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
rand1, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig1 := rand1.Sign([]byte("foo")).Marshal()
|
||||
|
||||
rand2, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig2 := rand2.Sign([]byte("bar")).Marshal()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -239,7 +246,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
},
|
||||
Signature: bls.RandKey().Sign([]byte("foo")).Marshal(),
|
||||
Signature: sig1,
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
@@ -249,7 +256,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
},
|
||||
Signature: bls.RandKey().Sign([]byte("bar")).Marshal(),
|
||||
Signature: sig2,
|
||||
},
|
||||
},
|
||||
beaconState: beaconState,
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
|
||||
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
|
||||
func ProcessRandao(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// retrieves the signature set from the raw data, public key,signature and domain provided.
|
||||
func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
|
||||
func retrieveSignatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -36,7 +36,7 @@ func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domai
|
||||
}
|
||||
|
||||
// verifies the signature from the raw data, public key and domain provided.
|
||||
func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error {
|
||||
func verifySignature(signedData, pub, signature, domain []byte) error {
|
||||
set, err := retrieveSignatureSet(signedData, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
package spectest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -26,9 +24,7 @@ func runDepositTest(t *testing.T, config string) {
|
||||
require.NoError(t, deposit.UnmarshalSSZ(depositFile), "Failed to unmarshal")
|
||||
|
||||
body := ðpb.BeaconBlockBody{Deposits: []*ethpb.Deposit{deposit}}
|
||||
testutil.RunBlockOperationTest(t, folderPath, body, func(ctx context.Context, state *state.BeaconState, b *ethpb.SignedBeaconBlock) (*state.BeaconState, error) {
|
||||
return blocks.ProcessDeposits(ctx, state, b)
|
||||
})
|
||||
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessDeposits)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
BIN
beacon-chain/core/blocks/testdata/beaconfuzz_78_attestation.ssz
vendored
Normal file
BIN
beacon-chain/core/blocks/testdata/beaconfuzz_78_attestation.ssz
vendored
Normal file
Binary file not shown.
BIN
beacon-chain/core/blocks/testdata/beaconfuzz_78_beacon.ssz
vendored
Normal file
BIN
beacon-chain/core/blocks/testdata/beaconfuzz_78_beacon.ssz
vendored
Normal file
Binary file not shown.
@@ -419,7 +419,7 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func buildState(t testing.TB, slot uint64, validatorCount uint64) *state.BeaconState {
|
||||
func buildState(t testing.TB, slot, validatorCount uint64) *state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -14,10 +14,6 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Balances stores balances such as prev/current total validator balances, attested balances and more.
|
||||
// It's used for metrics reporting.
|
||||
var Balances *Balance
|
||||
|
||||
// ProcessAttestations process the attestations in state and update individual validator's pre computes,
|
||||
// it also tracks and updates epoch attesting balances.
|
||||
func ProcessAttestations(
|
||||
@@ -56,7 +52,6 @@ func ProcessAttestations(
|
||||
}
|
||||
|
||||
pBal = UpdateBalance(vp, pBal)
|
||||
Balances = pBal
|
||||
|
||||
return vp, pBal, nil
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func AttestationsDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Valida
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
func attestationDelta(pBal *Balance, v *Validator, prevEpoch uint64, finalizedEpoch uint64) (uint64, uint64) {
|
||||
func attestationDelta(pBal *Balance, v *Validator, prevEpoch, finalizedEpoch uint64) (uint64, uint64) {
|
||||
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
|
||||
if !eligible || pBal.ActiveCurrentEpoch == 0 {
|
||||
return 0, 0
|
||||
@@ -182,7 +182,7 @@ func ProposersDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Validator
|
||||
// Spec code:
|
||||
// def is_in_inactivity_leak(state: BeaconState) -> bool:
|
||||
// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||
func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
|
||||
func isInInactivityLeak(prevEpoch, finalizedEpoch uint64) bool {
|
||||
return finalityDelay(prevEpoch, finalizedEpoch) > params.BeaconConfig().MinEpochsToInactivityPenalty
|
||||
}
|
||||
|
||||
@@ -191,6 +191,6 @@ func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
|
||||
// Spec code:
|
||||
// def get_finality_delay(state: BeaconState) -> uint64:
|
||||
// return get_previous_epoch(state) - state.finalized_checkpoint.epoch
|
||||
func finalityDelay(prevEpoch uint64, finalizedEpoch uint64) uint64 {
|
||||
func finalityDelay(prevEpoch, finalizedEpoch uint64) uint64 {
|
||||
return prevEpoch - finalizedEpoch
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
base, err := epoch.BaseReward(state, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
|
||||
assert.Equal(t, uint64(3*base), penalties[i], "Unexpected slashed indices penalty balance")
|
||||
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
|
||||
}
|
||||
|
||||
nonAttestedIndices := []uint64{434, 677, 872, 791}
|
||||
@@ -254,7 +254,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
}
|
||||
}
|
||||
|
||||
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
|
||||
func buildState(slot, validatorCount uint64) *pb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package precompute
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -21,8 +23,27 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, pBal *Balance) err
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
minSlashing := mathutil.Min(totalSlashing*3, pBal.ActiveCurrentEpoch)
|
||||
minSlashing := mathutil.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
epochToWithdraw := currentEpoch + exitLength/2
|
||||
|
||||
vs := state.ValidatorsReadOnly()
|
||||
var hasSlashing bool
|
||||
// Iterate through validator list in state, stop until a validator satisfies slashing condition of current epoch.
|
||||
for _, v := range vs {
|
||||
if v == nil {
|
||||
return errors.New("nil validator in state")
|
||||
}
|
||||
correctEpoch := epochToWithdraw == v.WithdrawableEpoch()
|
||||
if v.Slashed() && correctEpoch {
|
||||
hasSlashing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// Exit early if there's no meaningful slashing to process.
|
||||
if !hasSlashing {
|
||||
return nil
|
||||
}
|
||||
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
|
||||
correctEpoch := epochToWithdraw == val.WithdrawableEpoch
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessSlashingsPrecompute_NotSlashed(t *testing.T) {
|
||||
func TestProcessSlashingsPrecompute_NotSlashedWithSlashedTrue(t *testing.T) {
|
||||
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 0,
|
||||
Validators: []*ethpb.Validator{{Slashed: true}},
|
||||
@@ -28,6 +28,21 @@ func TestProcessSlashingsPrecompute_NotSlashed(t *testing.T) {
|
||||
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashingsPrecompute_NotSlashedWithSlashedFalse(t *testing.T) {
|
||||
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 0,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
pBal := &precompute.Balance{ActiveCurrentEpoch: params.BeaconConfig().MaxEffectiveBalance}
|
||||
require.NoError(t, precompute.ProcessSlashingsPrecompute(s, pBal))
|
||||
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashingsPrecompute_SlashedLess(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *pb.BeaconState
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package spectest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRewardsAndPenaltiesMainnet(t *testing.T) {
|
||||
runRewardsAndPenaltiesTests(t, "mainnet")
|
||||
}
|
||||
@@ -36,7 +36,6 @@ go_library(
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
|
||||
@@ -6,34 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
// SlotSignature returns the signed signature of the hash tree root of input slot.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
|
||||
// domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
||||
// signing_root = compute_signing_root(slot, domain)
|
||||
// return bls.Sign(privkey, signing_root)
|
||||
func SlotSignature(state *stateTrie.BeaconState, slot uint64, privKey bls.SecretKey) (bls.Signature, error) {
|
||||
d, err := Domain(state.Fork(), CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s, err := ComputeSigningRoot(slot, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return privKey.Sign(s[:]), nil
|
||||
}
|
||||
|
||||
// IsAggregator returns true if the signature is from the input validator. The committee
|
||||
// count is provided as an argument rather than direct implementation from spec. Having
|
||||
// count is provided as an argument rather than imported implementation from spec. Having
|
||||
// committee count as an argument allows cheaper computation at run time.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -164,3 +144,23 @@ func ValidateAttestationTime(attSlot uint64, genesisTime time.Time) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCheckpointEpoch is within current epoch and previous epoch
|
||||
// with respect to current time. Returns true if it's within, false if it's not.
|
||||
func VerifyCheckpointEpoch(c *ethpb.Checkpoint, genesis time.Time) bool {
|
||||
now := uint64(timeutils.Now().Unix())
|
||||
genesisTime := uint64(genesis.Unix())
|
||||
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := SlotToEpoch(currentSlot)
|
||||
|
||||
var prevEpoch uint64
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -18,27 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
func TestAttestation_SlotSignature(t *testing.T) {
|
||||
priv := bls.RandKey()
|
||||
pub := priv.PublicKey()
|
||||
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: []*ethpb.Validator{{PublicKey: pub.Marshal()}},
|
||||
Fork: &pb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
Slot: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
slot := uint64(101)
|
||||
|
||||
sig, err := helpers.SlotSignature(state, slot, priv)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, helpers.ComputeDomainVerifySigningRoot(state, 0, helpers.CurrentEpoch(state), slot,
|
||||
params.BeaconConfig().DomainBeaconAttester, sig.Marshal()))
|
||||
}
|
||||
|
||||
func TestAttestation_IsAggregator(t *testing.T) {
|
||||
t.Run("aggregator", func(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
@@ -70,7 +49,8 @@ func TestAttestation_AggregateSignature(t *testing.T) {
|
||||
atts := make([]*ethpb.Attestation, 0, 100)
|
||||
msg := bytesutil.ToBytes32([]byte("hello"))
|
||||
for i := 0; i < 100; i++ {
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := priv.PublicKey()
|
||||
sig := priv.Sign(msg[:])
|
||||
pubkeys = append(pubkeys, pub)
|
||||
@@ -87,7 +67,8 @@ func TestAttestation_AggregateSignature(t *testing.T) {
|
||||
atts := make([]*ethpb.Attestation, 0, 100)
|
||||
msg := []byte("hello")
|
||||
for i := 0; i < 100; i++ {
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := priv.PublicKey()
|
||||
sig := priv.Sign(msg)
|
||||
pubkeys = append(pubkeys, pub)
|
||||
@@ -235,3 +216,12 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
// Genesis was 6 epochs ago exactly.
|
||||
genesis := time.Now().Add(-1 * time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*params.BeaconConfig().SlotsPerEpoch*6))
|
||||
assert.Equal(t, true, helpers.VerifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 6}, genesis))
|
||||
assert.Equal(t, true, helpers.VerifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 5}, genesis))
|
||||
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 4}, genesis))
|
||||
assert.Equal(t, false, helpers.VerifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 2}, genesis))
|
||||
}
|
||||
|
||||
@@ -23,6 +23,15 @@ func BlockRootAtSlot(state *stateTrie.BeaconState, slot uint64) ([]byte, error)
|
||||
return state.BlockRootAtIndex(slot % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
}
|
||||
|
||||
// StateRootAtSlot returns the cached state root at that particular slot. If no state
|
||||
// root has been cached it will return a zero-hash.
|
||||
func StateRootAtSlot(state *stateTrie.BeaconState, slot uint64) ([]byte, error) {
|
||||
if slot >= state.Slot() || state.Slot() > slot+params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
return []byte{}, errors.Errorf("slot %d out of bounds", slot)
|
||||
}
|
||||
return state.StateRootAtIndex(slot % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root stored in the BeaconState for epoch start slot.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
@@ -12,16 +13,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
var committeeCache = cache.NewCommitteesCache()
|
||||
var proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
|
||||
// SlotCommitteeCount returns the number of crosslink committees of a slot. The
|
||||
// active validator count is provided as an argument rather than a direct implementation
|
||||
// active validator count is provided as an argument rather than a imported implementation
|
||||
// from the spec definition. Having the active validator count as an argument allows for
|
||||
// cheaper computation, instead of retrieving head state, one can retrieve the validator
|
||||
// count.
|
||||
@@ -66,7 +67,7 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
|
||||
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
|
||||
// count=committees_per_slot * SLOTS_PER_EPOCH,
|
||||
// )
|
||||
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committeeIndex uint64) ([]uint64, error) {
|
||||
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot, committeeIndex uint64) ([]uint64, error) {
|
||||
epoch := SlotToEpoch(slot)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
@@ -90,9 +91,9 @@ func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committ
|
||||
}
|
||||
|
||||
// BeaconCommittee returns the crosslink committee of a given slot and committee index. The
|
||||
// validator indices and seed are provided as an argument rather than a direct implementation
|
||||
// validator indices and seed are provided as an argument rather than a imported implementation
|
||||
// from the spec definition. Having them as an argument allows for cheaper computation run time.
|
||||
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, committeeIndex uint64) ([]uint64, error) {
|
||||
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot, committeeIndex uint64) ([]uint64, error) {
|
||||
indices, err := committeeCache.Committee(slot, seed, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not interface with committee cache")
|
||||
@@ -126,8 +127,7 @@ func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, comm
|
||||
func ComputeCommittee(
|
||||
indices []uint64,
|
||||
seed [32]byte,
|
||||
index uint64,
|
||||
count uint64,
|
||||
index, count uint64,
|
||||
) ([]uint64, error) {
|
||||
validatorCount := uint64(len(indices))
|
||||
start := sliceutil.SplitOffset(validatorCount, count, index)
|
||||
@@ -148,25 +148,6 @@ func ComputeCommittee(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This updates the cache on a miss.
|
||||
if featureconfig.Get().UseCheckPointInfoCache {
|
||||
sortedIndices := make([]uint64, len(indices))
|
||||
copy(sortedIndices, indices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
count = SlotCommitteeCount(uint64(len(shuffledIndices)))
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
ShuffledIndices: shuffledList,
|
||||
CommitteeCount: count * params.BeaconConfig().SlotsPerEpoch,
|
||||
Seed: seed,
|
||||
SortedIndices: sortedIndices,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
@@ -205,7 +186,9 @@ func CommitteeAssignments(
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerIndexToSlots := make(map[uint64][]uint64, params.BeaconConfig().SlotsPerEpoch)
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Proposal epochs do not have a look ahead, so we skip them over here.
|
||||
validProposalEpoch := epoch < nextEpoch
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
@@ -348,6 +331,12 @@ func UpdateCommitteeCache(state *stateTrie.BeaconState, epoch uint64) error {
|
||||
|
||||
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
|
||||
func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) error {
|
||||
// The cache uses the state root at the (current epoch - 2)'s slot as key. (e.g. for epoch 2, the key is root at slot 31)
|
||||
// Which is the reason why we skip genesis epoch.
|
||||
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
|
||||
indices, err := ActiveValidatorIndices(state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -356,21 +345,33 @@ func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The committee cache uses attester domain seed as key.
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
// Use state root from (current_epoch - 1 - lookahead))
|
||||
wantedEpoch := PrevEpoch(state)
|
||||
if wantedEpoch >= params.BeaconConfig().MinSeedLookahead {
|
||||
wantedEpoch -= params.BeaconConfig().MinSeedLookahead
|
||||
}
|
||||
s, err := EndSlot(wantedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := committeeCache.AddProposerIndicesList(seed, proposerIndices); err != nil {
|
||||
r, err := StateRootAtSlot(state, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
// Skip Cache if we have an invalid key
|
||||
if r == nil || bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
|
||||
return nil
|
||||
}
|
||||
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
|
||||
BlockRoot: bytesutil.ToBytes32(r),
|
||||
ProposerIndices: proposerIndices,
|
||||
})
|
||||
}
|
||||
|
||||
// ClearCache clears the committee cache
|
||||
func ClearCache() {
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
}
|
||||
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
|
||||
@@ -203,6 +203,36 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch uint64
|
||||
if i >= len(validators)/2 {
|
||||
activationEpoch = 3
|
||||
}
|
||||
validators[i] = ðpb.Validator{
|
||||
ActivationEpoch: activationEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := CommitteeAssignments(state, CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(state, CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -630,7 +660,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateProposerIndicesInCache_CouldNotGetActiveIndices(t *testing.T) {
|
||||
err := UpdateProposerIndicesInCache(&beaconstate.BeaconState{}, 0)
|
||||
err := UpdateProposerIndicesInCache(&beaconstate.BeaconState{}, 2)
|
||||
want := "nil inner state"
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func TotalActiveBalance(state *stateTrie.BeaconState) (uint64, error) {
|
||||
// Increase the validator balance at index ``index`` by ``delta``.
|
||||
// """
|
||||
// state.balances[index] += delta
|
||||
func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
|
||||
func IncreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
|
||||
balAtIdx, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -82,7 +82,7 @@ func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
|
||||
// Increase the validator balance at index ``index`` by ``delta``.
|
||||
// """
|
||||
// state.balances[index] += delta
|
||||
func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
|
||||
func IncreaseBalanceWithVal(currBalance, delta uint64) uint64 {
|
||||
return currBalance + delta
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
|
||||
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
|
||||
// """
|
||||
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
|
||||
func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
|
||||
func DecreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
|
||||
balAtIdx, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,7 +112,7 @@ func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
|
||||
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
|
||||
// """
|
||||
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
|
||||
func DecreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
|
||||
func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 {
|
||||
if delta > currBalance {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -34,13 +34,13 @@ func SplitIndices(l []uint64, n uint64) [][]uint64 {
|
||||
// We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays
|
||||
// constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based
|
||||
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
|
||||
func ShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
|
||||
func ShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
|
||||
return ComputeShuffledIndex(index, indexCount, seed, true /* shuffle */)
|
||||
}
|
||||
|
||||
// UnShuffledIndex returns the inverse of ShuffledIndex. This implementation is based
|
||||
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
|
||||
func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
|
||||
func UnShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
|
||||
return ComputeShuffledIndex(index, indexCount, seed, false /* un-shuffle */)
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, er
|
||||
// index = flip if bit else index
|
||||
//
|
||||
// return ValidatorIndex(index)
|
||||
func ComputeShuffledIndex(index uint64, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
|
||||
func ComputeShuffledIndex(index, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
|
||||
if params.BeaconConfig().ShuffleRoundCount == 0 {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
|
||||
list := []uint64{}
|
||||
var list []uint64
|
||||
listSize := uint64(1000)
|
||||
seed := [32]byte{123, 42}
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
@@ -124,7 +124,7 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestShuffledIndex(t *testing.T) {
|
||||
list := []uint64{}
|
||||
var list []uint64
|
||||
listSize := uint64(399)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
list = append(list, i)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -66,7 +65,7 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
container := &p2ppb.SigningData{
|
||||
container := &pb.SigningData{
|
||||
ObjectRoot: objRoot[:],
|
||||
Domain: domain,
|
||||
}
|
||||
@@ -74,7 +73,7 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
|
||||
}
|
||||
|
||||
// ComputeDomainVerifySigningRoot computes domain and verifies signing root of an object given the beacon state, validator index and signature.
|
||||
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
|
||||
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
|
||||
v, err := state.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -87,7 +86,7 @@ func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoc
|
||||
}
|
||||
|
||||
// VerifySigningRoot verifies the signing root of an object given it's public key, signature and domain.
|
||||
func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []byte) error {
|
||||
func VerifySigningRoot(obj interface{}, pub, signature, domain []byte) error {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -107,7 +106,7 @@ func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []b
|
||||
}
|
||||
|
||||
// VerifyBlockSigningRoot verifies the signing root of a block given it's public key, signature and domain.
|
||||
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) error {
|
||||
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub, signature, domain []byte) error {
|
||||
set, err := RetrieveBlockSignatureSet(blk, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -129,15 +128,13 @@ func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte
|
||||
|
||||
// RetrieveBlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
|
||||
// into a signature set object.
|
||||
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
|
||||
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub, signature, domain []byte) (*bls.SignatureSet, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
root, err := signingData(func() ([32]byte, error) {
|
||||
// utilize custom block hashing function
|
||||
return blk.HashTreeRoot()
|
||||
}, domain)
|
||||
// utilize custom block hashing function
|
||||
root, err := signingData(blk.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -149,7 +146,7 @@ func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []b
|
||||
}
|
||||
|
||||
// VerifyBlockHeaderSigningRoot verifies the signing root of a block header given it's public key, signature and domain.
|
||||
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, signature []byte, domain []byte) error {
|
||||
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signature, domain []byte) error {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -158,9 +155,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, s
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := signingData(func() ([32]byte, error) {
|
||||
return blkHdr.HashTreeRoot()
|
||||
}, domain)
|
||||
root, err := signingData(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -183,7 +178,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, s
|
||||
// genesis_validators_root = Root() # all bytes zero by default
|
||||
// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
|
||||
// return Domain(domain_type + fork_data_root[:28])
|
||||
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) {
|
||||
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValidatorsRoot []byte) ([]byte, error) {
|
||||
if forkVersion == nil {
|
||||
forkVersion = params.BeaconConfig().GenesisForkVersion
|
||||
}
|
||||
@@ -203,7 +198,7 @@ func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesi
|
||||
|
||||
// This returns the bls domain given by the domain type and fork data root.
|
||||
func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
|
||||
b := []byte{}
|
||||
var b []byte
|
||||
b = append(b, domainType[:4]...)
|
||||
b = append(b, forkDataRoot[:28]...)
|
||||
return b
|
||||
@@ -222,7 +217,7 @@ func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
|
||||
// current_version=current_version,
|
||||
// genesis_validators_root=genesis_validators_root,
|
||||
// ))
|
||||
func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
|
||||
func computeForkDataRoot(version, root []byte) ([32]byte, error) {
|
||||
r, err := (&pb.ForkData{
|
||||
CurrentVersion: version,
|
||||
GenesisValidatorsRoot: root,
|
||||
@@ -243,7 +238,7 @@ func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
|
||||
// 4-bytes suffices for practical separation of forks/chains.
|
||||
// """
|
||||
// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
|
||||
func ComputeForkDigest(version []byte, genesisValidatorsRoot []byte) ([4]byte, error) {
|
||||
func ComputeForkDigest(version, genesisValidatorsRoot []byte) ([4]byte, error) {
|
||||
dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return [4]byte{}, err
|
||||
|
||||
@@ -63,9 +63,9 @@ func TestFuzzverifySigningRoot_10000(t *testing.T) {
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := []byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
|
||||
@@ -2,6 +2,7 @@ package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -82,6 +83,19 @@ func StartSlot(epoch uint64) (uint64, error) {
|
||||
return slot, nil
|
||||
}
|
||||
|
||||
// EndSlot returns the last slot number of the
|
||||
// current epoch.
|
||||
func EndSlot(epoch uint64) (uint64, error) {
|
||||
if epoch == math.MaxUint64 {
|
||||
return 0, errors.New("start slot calculation overflows")
|
||||
}
|
||||
slot, err := StartSlot(epoch + 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return slot - 1, nil
|
||||
}
|
||||
|
||||
// IsEpochStart returns true if the given slot number is an epoch starting slot
|
||||
// number.
|
||||
func IsEpochStart(slot uint64) bool {
|
||||
@@ -100,7 +114,7 @@ func SlotsSinceEpochStarts(slot uint64) uint64 {
|
||||
}
|
||||
|
||||
// VerifySlotTime validates the input slot is not from the future.
|
||||
func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration) error {
|
||||
func VerifySlotTime(genesisTime, slot uint64, timeTolerance time.Duration) error {
|
||||
slotTime, err := SlotToTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -122,14 +136,14 @@ func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration
|
||||
}
|
||||
|
||||
// SlotToTime takes the given slot and genesis time to determine the start time of the slot.
|
||||
func SlotToTime(genesisTimeSec uint64, slot uint64) (time.Time, error) {
|
||||
func SlotToTime(genesisTimeSec, slot uint64) (time.Time, error) {
|
||||
timeSinceGenesis, err := mathutil.Mul64(slot, params.BeaconConfig().SecondsPerSlot)
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %v", slot, err)
|
||||
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %w", slot, err)
|
||||
}
|
||||
sTime, err := mathutil.Add64(genesisTimeSec, timeSinceGenesis)
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %v", slot, err)
|
||||
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %w", slot, err)
|
||||
}
|
||||
return time.Unix(int64(sTime), 0), nil
|
||||
}
|
||||
@@ -153,7 +167,7 @@ func CurrentSlot(genesisTimeSec uint64) uint64 {
|
||||
// ValidateSlotClock validates a provided slot against the local
|
||||
// clock to ensure slots that are unreasonable are returned with
|
||||
// an error.
|
||||
func ValidateSlotClock(slot uint64, genesisTimeSec uint64) error {
|
||||
func ValidateSlotClock(slot, genesisTimeSec uint64) error {
|
||||
maxPossibleSlot := CurrentSlot(genesisTimeSec) + MaxSlotBuffer
|
||||
// Defensive check to ensure that we only process slots up to a hard limit
|
||||
// from our local clock.
|
||||
@@ -204,3 +218,13 @@ func WeakSubjectivityCheckptEpoch(valCount uint64) (uint64, error) {
|
||||
}
|
||||
return wsp, nil
|
||||
}
|
||||
|
||||
// VotingPeriodStartTime returns the current voting period's start time
|
||||
// depending on the provided genesis and current slot.
|
||||
func VotingPeriodStartTime(genesis, slot uint64) uint64 {
|
||||
startTime := genesis
|
||||
startTime +=
|
||||
(slot - (slot % (params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch))) *
|
||||
params.BeaconConfig().SecondsPerSlot
|
||||
return startTime
|
||||
}
|
||||
|
||||
@@ -96,11 +96,34 @@ func TestEpochStartSlot_OK(t *testing.T) {
|
||||
{epoch: 1 << 60, startSlot: 1 << 63, error: true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state := &pb.BeaconState{Slot: tt.epoch}
|
||||
ss, err := StartSlot(tt.epoch)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", state.Slot)
|
||||
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", tt.epoch)
|
||||
} else {
|
||||
require.ErrorContains(t, "start slot calculation overflow", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEpochEndSlot_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
epoch uint64
|
||||
startSlot uint64
|
||||
error bool
|
||||
}{
|
||||
{epoch: 0, startSlot: 1*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
|
||||
{epoch: 1, startSlot: 2*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
|
||||
{epoch: 10, startSlot: 11*params.BeaconConfig().SlotsPerEpoch - 1, error: false},
|
||||
{epoch: 1 << 59, startSlot: 1 << 63, error: true},
|
||||
{epoch: 1 << 60, startSlot: 1 << 63, error: true},
|
||||
{epoch: math.MaxUint64, startSlot: 0, error: true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
ss, err := EndSlot(tt.epoch)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.startSlot, ss, "StartSlot(%d)", tt.epoch)
|
||||
} else {
|
||||
require.ErrorContains(t, "start slot calculation overflow", err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -29,7 +31,7 @@ func IsActiveValidatorUsingTrie(validator *stateTrie.ReadOnlyValidator, epoch ui
|
||||
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
|
||||
}
|
||||
|
||||
func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch uint64) bool {
|
||||
func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch uint64) bool {
|
||||
return activationEpoch <= epoch && epoch < exitEpoch
|
||||
}
|
||||
|
||||
@@ -42,7 +44,7 @@ func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch
|
||||
// Check if ``validator`` is slashable.
|
||||
// """
|
||||
// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
|
||||
func IsSlashableValidator(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
|
||||
func IsSlashableValidator(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
|
||||
return checkValidatorSlashable(activationEpoch, withdrawableEpoch, slashed, epoch)
|
||||
}
|
||||
|
||||
@@ -51,7 +53,7 @@ func IsSlashableValidatorUsingTrie(val *stateTrie.ReadOnlyValidator, epoch uint6
|
||||
return checkValidatorSlashable(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), epoch)
|
||||
}
|
||||
|
||||
func checkValidatorSlashable(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
|
||||
func checkValidatorSlashable(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
|
||||
active := activationEpoch <= epoch
|
||||
beforeWithdrawable := epoch < withdrawableEpoch
|
||||
return beforeWithdrawable && active && !slashed
|
||||
@@ -175,20 +177,39 @@ func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
|
||||
// return compute_proposer_index(state, indices, seed)
|
||||
func BeaconProposerIndex(state *stateTrie.BeaconState) (uint64, error) {
|
||||
e := CurrentEpoch(state)
|
||||
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
proposerIndices, err := committeeCache.ProposerIndices(seed)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
if proposerIndices != nil {
|
||||
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
wantedEpoch := PrevEpoch(state)
|
||||
if wantedEpoch >= params.BeaconConfig().MinSeedLookahead {
|
||||
wantedEpoch -= params.BeaconConfig().MinSeedLookahead
|
||||
}
|
||||
s, err := EndSlot(wantedEpoch)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
r, err := StateRootAtSlot(state, s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r != nil && !bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
|
||||
proposerIndices, err := proposerIndicesCache.ProposerIndices(bytesutil.ToBytes32(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
if proposerIndices != nil {
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return 0, errors.Errorf("length of proposer indices is not equal %d to slots per epoch", len(proposerIndices))
|
||||
}
|
||||
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
if err := UpdateProposerIndicesInCache(state, e); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seed, err = Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
@@ -201,19 +222,11 @@ func BeaconProposerIndex(state *stateTrie.BeaconState) (uint64, error) {
|
||||
return 0, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
|
||||
if err := UpdateProposerIndicesInCache(state, e); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
return ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
}
|
||||
|
||||
// ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer.
|
||||
//
|
||||
// This method is more efficient than ComputeProposerIndexWithValidators as it uses the read only validator
|
||||
// abstraction to retrieve validator related data. Whereas the other method requires a whole copy of the validator
|
||||
// set.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
|
||||
// """
|
||||
@@ -260,57 +273,6 @@ func ComputeProposerIndex(bState *stateTrie.BeaconState, activeIndices []uint64,
|
||||
}
|
||||
}
|
||||
|
||||
// ComputeProposerIndexWithValidators returns the index sampled by effective balance, which is used to calculate proposer.
|
||||
//
|
||||
// Note: This method signature deviates slightly from the spec recommended definition. The full
|
||||
// state object is not required to compute the proposer index.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
|
||||
// """
|
||||
// Return from ``indices`` a random index sampled by effective balance.
|
||||
// """
|
||||
// assert len(indices) > 0
|
||||
// MAX_RANDOM_BYTE = 2**8 - 1
|
||||
// i = 0
|
||||
// while True:
|
||||
// candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
|
||||
// random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
// return ValidatorIndex(candidate_index)
|
||||
// i += 1
|
||||
// Deprecated: Prefer using the beacon state with ComputeProposerIndex to avoid an unnecessary copy of the validator set.
|
||||
func ComputeProposerIndexWithValidators(validators []*ethpb.Validator, activeIndices []uint64, seed [32]byte) (uint64, error) {
|
||||
length := uint64(len(activeIndices))
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
maxRandomByte := uint64(1<<8 - 1)
|
||||
hashFunc := hashutil.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := ComputeShuffledIndex(i%length, length, seed, true /* shuffle */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
candidateIndex = activeIndices[candidateIndex]
|
||||
if candidateIndex >= uint64(len(validators)) {
|
||||
return 0, errors.New("active index out of range")
|
||||
}
|
||||
b := append(seed[:], bytesutil.Bytes8(i/32)...)
|
||||
randomByte := hashFunc(b)[i%32]
|
||||
v := validators[candidateIndex]
|
||||
var effectiveBal uint64
|
||||
if v != nil {
|
||||
effectiveBal = v.EffectiveBalance
|
||||
}
|
||||
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Domain returns the domain version for BLS private key to sign and verify.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -362,7 +324,7 @@ func IsEligibleForActivationQueueUsingTrie(validator *stateTrie.ReadOnlyValidato
|
||||
}
|
||||
|
||||
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue*
|
||||
func isEligibileForActivationQueue(activationEligibilityEpoch uint64, effectiveBalance uint64) bool {
|
||||
func isEligibileForActivationQueue(activationEligibilityEpoch, effectiveBalance uint64) bool {
|
||||
return activationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch &&
|
||||
effectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
@@ -395,7 +357,7 @@ func IsEligibleForActivationUsingTrie(state *stateTrie.BeaconState, validator *s
|
||||
}
|
||||
|
||||
// isEligibleForActivation carries out the logic for IsEligibleForActivation*
|
||||
func isEligibleForActivation(activationEligibilityEpoch uint64, activationEpoch uint64, finalizedEpoch uint64) bool {
|
||||
func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finalizedEpoch uint64) bool {
|
||||
return activationEligibilityEpoch <= finalizedEpoch &&
|
||||
activationEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestIsActiveValidator_OK(t *testing.T) {
|
||||
@@ -275,6 +275,39 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ClearCache()
|
||||
c := params.BeaconConfig()
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
roots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerHistoricalRoot; i++ {
|
||||
roots[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: roots,
|
||||
StateRoots: roots,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set a very high slot, so that retrieved block root will be
|
||||
// non existent for the proposer cache.
|
||||
require.NoError(t, state.SetSlot(100))
|
||||
_, err = BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(proposerIndicesCache.ProposerIndicesCache.ListKeys()))
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -309,7 +342,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hashutil.Hash(seedWithSlot)
|
||||
index, err := ComputeProposerIndexWithValidators(state.Validators(), indices, seedWithSlotHash)
|
||||
index, err := computeProposerIndexWithValidators(state.Validators(), indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
wantedProposerIndices = append(wantedProposerIndices, index)
|
||||
}
|
||||
@@ -743,3 +776,33 @@ func TestIsIsEligibleForActivation(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeIndices []uint64, seed [32]byte) (uint64, error) {
|
||||
length := uint64(len(activeIndices))
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
maxRandomByte := uint64(1<<8 - 1)
|
||||
hashFunc := hashutil.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := ComputeShuffledIndex(i%length, length, seed, true /* shuffle */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
candidateIndex = activeIndices[candidateIndex]
|
||||
if candidateIndex >= uint64(len(validators)) {
|
||||
return 0, errors.New("active index out of range")
|
||||
}
|
||||
b := append(seed[:], bytesutil.Bytes8(i/32)...)
|
||||
randomByte := hashFunc(b)[i%32]
|
||||
v := validators[candidateIndex]
|
||||
var effectiveBal uint64
|
||||
if v != nil {
|
||||
effectiveBal = v.EffectiveBalance
|
||||
}
|
||||
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,8 @@ go_library(
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
|
||||
@@ -37,7 +37,6 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
block, err := benchutil.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.N = runAmount
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block)
|
||||
@@ -64,7 +63,6 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
_, err = state.ExecuteStateTransition(context.Background(), beaconState, block)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
|
||||
b.N = runAmount
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := state.ExecuteStateTransition(context.Background(), cleanStates[i], block)
|
||||
@@ -97,7 +95,6 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
|
||||
beaconState, err := benchutil.PreGenState2FullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.N = 50
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := beaconState.HashTreeRoot(context.Background())
|
||||
@@ -115,7 +112,6 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
_, err = beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.N = 50
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := beaconState.HashTreeRoot(ctx)
|
||||
@@ -131,7 +127,6 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.N = 1000
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := proto.Marshal(natState)
|
||||
require.NoError(b, err)
|
||||
@@ -141,7 +136,6 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.N = 1000
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := natState.MarshalSSZ()
|
||||
require.NoError(b, err)
|
||||
@@ -161,7 +155,6 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.N = 1000
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.NoError(b, proto.Unmarshal(protoObject, &pb.BeaconState{}))
|
||||
}
|
||||
@@ -170,7 +163,6 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.N = 1000
|
||||
for i := 0; i < b.N; i++ {
|
||||
sszState := &pb.BeaconState{}
|
||||
require.NoError(b, sszState.UnmarshalSSZ(sszObject))
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
// SkipSlotCache exists for the unlikely scenario that is a large gap between the head state and
|
||||
@@ -9,3 +14,14 @@ import (
|
||||
// difficult or impossible to compute the appropriate beacon state for assignments within a
|
||||
// reasonable amount of time.
|
||||
var SkipSlotCache = cache.NewSkipSlotCache()
|
||||
|
||||
// The key for skip slot cache is mixed between state root and state slot.
|
||||
// state root is in the mix to defend against different forks with same skip slots
|
||||
// to hit the same cache. We don't want beacon states mixed up between different chains.
|
||||
func cacheKey(ctx context.Context, state *beaconstate.BeaconState) ([32]byte, error) {
|
||||
r, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return hashutil.Hash(append(bytesutil.Bytes32(state.Slot()), r[:]...)), nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package state_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
@@ -36,3 +38,123 @@ func TestSkipSlotCache_OK(t *testing.T) {
|
||||
t.Fatal("Skipped slots cache leads to different states")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
bState, privs := testutil.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
originalState, err := beaconstate.InitializeFromProto(bState.CloneInnerState())
|
||||
require.NoError(t, err)
|
||||
|
||||
blkCfg := testutil.DefaultBlockGenConfig()
|
||||
blkCfg.NumAttestations = 1
|
||||
|
||||
state.SkipSlotCache.Disable()
|
||||
|
||||
// First transition will be with an empty cache, so the cache becomes populated
|
||||
// with the state
|
||||
blk, err := testutil.GenerateFullBlock(bState, privs, blkCfg, originalState.Slot()+10)
|
||||
require.NoError(t, err)
|
||||
originalState, err = state.ExecuteStateTransition(context.Background(), originalState, blk)
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
|
||||
// Create two shallow but different forks
|
||||
var state1, state2 *beaconstate.BeaconState
|
||||
{
|
||||
blk, err := testutil.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
|
||||
require.NoError(t, err)
|
||||
copy(blk.Block.Body.Graffiti, "block 1")
|
||||
signature, err := testutil.BlockSignature(originalState, blk.Block, privs)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = signature.Marshal()
|
||||
state1, err = state.ExecuteStateTransition(context.Background(), originalState.Copy(), blk)
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
}
|
||||
|
||||
{
|
||||
blk, err := testutil.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
|
||||
require.NoError(t, err)
|
||||
copy(blk.Block.Body.Graffiti, "block 2")
|
||||
signature, err := testutil.BlockSignature(originalState, blk.Block, privs)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = signature.Marshal()
|
||||
state2, err = state.ExecuteStateTransition(context.Background(), originalState.Copy(), blk)
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
}
|
||||
|
||||
r1, err := state1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
r2, err := state2.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
if r1 == r2 {
|
||||
t.Fatalf("need different starting states, got: %x", r1)
|
||||
}
|
||||
|
||||
if state1.Slot() != state2.Slot() {
|
||||
t.Fatalf("expecting different chains, but states at same slot")
|
||||
}
|
||||
|
||||
// prepare copies for both states
|
||||
var setups []*beaconstate.BeaconState
|
||||
for i := uint64(0); i < 300; i++ {
|
||||
var st *beaconstate.BeaconState
|
||||
if i%2 == 0 {
|
||||
st = state1
|
||||
} else {
|
||||
st = state2
|
||||
}
|
||||
setups = append(setups, st.Copy())
|
||||
}
|
||||
|
||||
problemSlot := state1.Slot() + 2
|
||||
expected1, err := state.ProcessSlots(context.Background(), state1.Copy(), problemSlot)
|
||||
require.NoError(t, err)
|
||||
expectedRoot1, err := expected1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Logf("chain 1 (even i) expected root %x at slot %d", expectedRoot1[:], problemSlot)
|
||||
|
||||
tmp1, err := state.ProcessSlots(context.Background(), expected1.Copy(), problemSlot+1)
|
||||
require.NoError(t, err)
|
||||
if gotRoot := tmp1.StateRoots()[problemSlot]; !bytes.Equal(gotRoot, expectedRoot1[:]) {
|
||||
t.Fatalf("state roots for chain 1 are bad, expected root doesn't match: %x <> %x", gotRoot, expectedRoot1[:])
|
||||
}
|
||||
|
||||
expected2, err := state.ProcessSlots(context.Background(), state2.Copy(), problemSlot)
|
||||
require.NoError(t, err)
|
||||
expectedRoot2, err := expected2.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Logf("chain 2 (odd i) expected root %x at slot %d", expectedRoot2[:], problemSlot)
|
||||
|
||||
tmp2, err := state.ProcessSlots(context.Background(), expected2.Copy(), problemSlot+1)
|
||||
require.NoError(t, err)
|
||||
if gotRoot := tmp2.StateRoots()[problemSlot]; !bytes.Equal(gotRoot, expectedRoot2[:]) {
|
||||
t.Fatalf("state roots for chain 2 are bad, expected root doesn't match %x <> %x", gotRoot, expectedRoot2[:])
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(setups))
|
||||
|
||||
step := func(i int, setup *beaconstate.BeaconState) {
|
||||
// go at least 1 past problemSlot, to ensure problem slot state root is available
|
||||
outState, err := state.ProcessSlots(context.Background(), setup, problemSlot+1+uint64(i)) // keep increasing, to hit and extend the cache
|
||||
require.NoError(t, err, "Could not process state transition")
|
||||
roots := outState.StateRoots()
|
||||
gotRoot := roots[problemSlot]
|
||||
if i%2 == 0 {
|
||||
if !bytes.Equal(gotRoot, expectedRoot1[:]) {
|
||||
t.Errorf("unexpected root on chain 1, item %3d: %x", i, gotRoot)
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(gotRoot, expectedRoot2[:]) {
|
||||
t.Errorf("unexpected root on chain 2, item %3d: %x", i, gotRoot)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
state.SkipSlotCache.Enable()
|
||||
// now concurrently apply the blocks (alternating between states, and increasing skip slots)
|
||||
for i, setup := range setups {
|
||||
go step(i, setup)
|
||||
}
|
||||
// Wait for all transitions to finish
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func GenesisBeaconState(deposits []*ethpb.Deposit, genesisTime uint64, eth1Data
|
||||
return nil, err
|
||||
}
|
||||
// Process initial deposits.
|
||||
leaves := [][]byte{}
|
||||
var leaves [][]byte
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
|
||||
@@ -77,12 +77,12 @@ func GenesisBeaconState(deposits []*ethpb.Deposit, genesisTime uint64, eth1Data
|
||||
}
|
||||
var trie *trieutil.SparseMerkleTrie
|
||||
if len(leaves) > 0 {
|
||||
trie, err = trieutil.GenerateTrieFromItems(leaves, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err = trieutil.GenerateTrieFromItems(leaves, params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
trie, err = trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err = trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -249,7 +249,7 @@ func EmptyGenesisState() (*stateTrie.BeaconState, error) {
|
||||
// return True
|
||||
// This method has been modified from the spec to allow whole states not to be saved
|
||||
// but instead only cache the relevant information.
|
||||
func IsValidGenesisState(chainStartDepositCount uint64, currentTime uint64) bool {
|
||||
func IsValidGenesisState(chainStartDepositCount, currentTime uint64) bool {
|
||||
if currentTime < params.BeaconConfig().MinGenesisTime {
|
||||
return false
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user