mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
132 Commits
lock_read_
...
blob-verif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a01a824b57 | ||
|
|
f1a1bffea6 | ||
|
|
71c04ef6a2 | ||
|
|
2d5e214086 | ||
|
|
41696e0e8e | ||
|
|
98e5278f32 | ||
|
|
977e5bafc7 | ||
|
|
d25b7d27ed | ||
|
|
e3300d1c59 | ||
|
|
b0aa26b5f8 | ||
|
|
29f00e61d2 | ||
|
|
3b33a95c96 | ||
|
|
632010665f | ||
|
|
c324242121 | ||
|
|
d8d34fc4ff | ||
|
|
4c381938e1 | ||
|
|
d4726f2866 | ||
|
|
4e3419e870 | ||
|
|
ac06362baf | ||
|
|
28aa11c976 | ||
|
|
798d5ec585 | ||
|
|
9b97f3fd92 | ||
|
|
0946b5853f | ||
|
|
1530d17977 | ||
|
|
e46f9c5631 | ||
|
|
3097601530 | ||
|
|
4a515c36e6 | ||
|
|
afaeff9d4c | ||
|
|
f663f605d2 | ||
|
|
12f7143c4f | ||
|
|
1f250f7e89 | ||
|
|
0f65e51d1e | ||
|
|
d1dd8471a3 | ||
|
|
7a6487b746 | ||
|
|
daa6d2e741 | ||
|
|
8a743a6430 | ||
|
|
c0fb16a96f | ||
|
|
57eda1de63 | ||
|
|
a54e61ecb0 | ||
|
|
27b4e32e1c | ||
|
|
b56bf00682 | ||
|
|
b24b60dbd8 | ||
|
|
dc9d34b41b | ||
|
|
2ef0b3526d | ||
|
|
047613069e | ||
|
|
159a5dd69d | ||
|
|
470ea6d717 | ||
|
|
b441f20e6a | ||
|
|
2ea5bff9c0 | ||
|
|
c2433ff854 | ||
|
|
82640b3d88 | ||
|
|
f925aded66 | ||
|
|
10a89fef13 | ||
|
|
56c65b8527 | ||
|
|
022ee17af9 | ||
|
|
203dc5f63b | ||
|
|
6f941b8138 | ||
|
|
ac412259eb | ||
|
|
3d78a52980 | ||
|
|
5de8ec4600 | ||
|
|
7e88eefc60 | ||
|
|
cabf3476e7 | ||
|
|
5a01eecc50 | ||
|
|
b608c9f711 | ||
|
|
671bf00c98 | ||
|
|
cbf6a2752d | ||
|
|
642458f037 | ||
|
|
2a067d5d03 | ||
|
|
a2f60364ae | ||
|
|
45f68fa8d5 | ||
|
|
f55708b995 | ||
|
|
00826e8858 | ||
|
|
76fec1799e | ||
|
|
9c938d354d | ||
|
|
83932d8e05 | ||
|
|
beebb56c8e | ||
|
|
0920fb1f61 | ||
|
|
29f8880638 | ||
|
|
f91efafe24 | ||
|
|
9387a36b66 | ||
|
|
65ce27292c | ||
|
|
823f8ee3a2 | ||
|
|
88e1b9edb3 | ||
|
|
c7e28908f5 | ||
|
|
7143fe80bc | ||
|
|
e231d88ca0 | ||
|
|
0486b64dcc | ||
|
|
b4847ac9ad | ||
|
|
bc125a95ae | ||
|
|
f592bf7f07 | ||
|
|
bcc23d2ded | ||
|
|
6e0715e92a | ||
|
|
71fa70ce40 | ||
|
|
4809da62cc | ||
|
|
e10dbaa8b4 | ||
|
|
71b08a50b7 | ||
|
|
cb5ce74a23 | ||
|
|
cc81444e13 | ||
|
|
bfae7f3c9f | ||
|
|
2fc5011091 | ||
|
|
493a7179d7 | ||
|
|
b52baba2f1 | ||
|
|
2f378a045a | ||
|
|
58cdb29ef3 | ||
|
|
cc2b4db582 | ||
|
|
be9b6ea837 | ||
|
|
806a394c89 | ||
|
|
97a99874e8 | ||
|
|
945b087ca9 | ||
|
|
b57effd096 | ||
|
|
867db1aeee | ||
|
|
99843688cd | ||
|
|
a536612c39 | ||
|
|
c5501f8775 | ||
|
|
55e4c6e1db | ||
|
|
2806326155 | ||
|
|
d7318ea485 | ||
|
|
e183d1dff4 | ||
|
|
a3868e7fc6 | ||
|
|
af70677778 | ||
|
|
8eb82dd378 | ||
|
|
0bd232667b | ||
|
|
39072e1b74 | ||
|
|
66011d5d9c | ||
|
|
419dbd57f7 | ||
|
|
da6ae3c204 | ||
|
|
44973b0bb3 | ||
|
|
de0c7e6256 | ||
|
|
c1c0cd040c | ||
|
|
734eb98941 | ||
|
|
ffaef83634 | ||
|
|
f9a40ef111 |
1
.bazelrc
1
.bazelrc
@@ -27,6 +27,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
# Release flags
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
@@ -144,6 +144,11 @@ config_setting(
|
||||
values = {"define": "coverage_enabled=1"},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "pgo_enabled",
|
||||
values = {"define": "pgo_enabled=1"},
|
||||
)
|
||||
|
||||
common_files = {
|
||||
"//:LICENSE.md": "LICENSE.md",
|
||||
"//:README.md": "README.md",
|
||||
|
||||
@@ -1,45 +1,53 @@
|
||||
## Terms of Use
|
||||
# Terms of Use
|
||||
Effective as of November 2, 2023
|
||||
|
||||
Effective as of Oct 14, 2020
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Offchain Labs, Inc. (as successor in interest to Prysmatic Labs LLC) (referenced herein as “Offchain Labs”, “we” or “us”). If you do not agree to the Terms, do not download or use Prysm. Additionally, the Terms of Use available at https://arbitrum.io/tos (or any successor site, the “OCL Terms of Use”) are hereby incorporated by reference into these Terms. In the event of any conflict between provisions set forth herein and those set forth in the OCL Terms of Use, the provisions set forth herein shall control.
|
||||
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
## About Prysm
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
Prysm is a client implementation for the Ethereum blockchain’s consensus protocol. To participate in the network, a user must send ETH from the Ethereum mainnet blockchain to a validator deposit smart contract on Ethereum mainnet. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
## Licensing Terms
|
||||
Prysm is an open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
The Offchain Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Offchain Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
PLEASE READ THESE TERMS CAREFULLY, AS THE OCL TERMS OF USE INCORPORATED BY REFERENCE HEREIN CONTAIN AN AGREEMENT TO ARBITRATE AND OTHER IMPORTANT INFORMATION REGARDING YOUR LEGAL RIGHTS, REMEDIES, AND OBLIGATIONS. THE AGREEMENT TO ARBITRATE REQUIRES (WITH LIMITED EXCEPTION) THAT YOU SUBMIT CLAIMS YOU HAVE AGAINST US TO BINDING AND FINAL ARBITRATION, AND FURTHER (1) YOU WILL ONLY BE PERMITTED TO PURSUE CLAIMS AGAINST OFFCHAIN LABS ON AN INDIVIDUAL BASIS, NOT AS A PLAINTIFF OR CLASS MEMBER IN ANY CLASS OR REPRESENTATIVE ACTION OR PROCEEDING, (2) YOU WILL ONLY BE PERMITTED TO SEEK RELIEF (INCLUDING MONETARY, INJUNCTIVE, AND DECLARATORY RELIEF) ON AN INDIVIDUAL BASIS, AND (3) YOU MAY NOT BE ABLE TO HAVE ANY CLAIMS YOU HAVE AGAINST US RESOLVED BY A JURY OR IN A COURT OF LAW.
|
||||
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
## Risks of Operating Prysm
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money, tokens and value. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm, including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
|
||||
YOU ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY RISKS ASSOCIATED WITH YOUR USE OF PRYSM, AND CANNOT BE HELD LIABLE FOR ANY RESULTING LOSSES THAT YOU EXPERIENCE WHILE ACCESSING OR USING PRYSM.
|
||||
|
||||
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
|
||||
BY ACCESSING AND USING PRYSM, YOU REPRESENT AND WARRANT THAT YOU UNDERSTAND THE INHERENT RISKS ASSOCIATED WITH USING CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS, AND THAT YOU HAVE A WORKING KNOWLEDGE OF THE USAGE AND INTRICACIES OF DIGITAL ASSETS, SUCH AS THOSE FOLLOWING THE ETHEREUM TOKEN STANDARD (ERC-20). YOU FURTHER UNDERSTAND THAT THE MARKETS FOR DIGITAL ASSETS ARE HIGHLY VOLATILE DUE TO VARIOUS FACTORS, INCLUDING ADOPTION, SPECULATION, TECHNOLOGY, SECURITY, AND REGULATION. YOU ACKNOWLEDGE AND ACCEPT THAT THE COST AND SPEED OF TRANSACTING WITH CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS SUCH AS ETHEREUM ARE VARIABLE AND MAY INCREASE DRAMATICALLY AT ANY TIME. YOU UNDERSTAND THAT ANYONE CAN CREATE A TOKEN, INCLUDING FAKE VERSIONS OF EXISTING TOKENS AND TOKENS THAT FALSELY CLAIM TO REPRESENT PROJECTS, AND ACKNOWLEDGE AND ACCEPT THE RISK THAT YOU MAY MISTAKENLY INTERACT WITH THOSE OR OTHER TOKENS. YOU FURTHER ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY OF THE VARIABLES OR RISKS DESCRIBED IN THESE TERMS. YOU UNDERSTAND AND AGREE TO ASSUME FULL RESPONSIBILITY FOR ALL OF THE RISKS OF ACCESSING AND USING PRYSM. YOU ARE SOLELY RESPONSIBLE FOR YOUR WALLETS, FOR SAFEGUARDING THE ASSOCIATED PRIVATE KEY AND FOR ANY ACTIVITY THAT OCCURS USING YOUR WALLET. WITHOUT LIMITING THE FOREGOING, YOU ALSO UNDERSTAND THAT THERE MAY BE TAX AND REGULATORY RISKS RELATED TO USING PRYSM. IT IS YOUR SOLE RESPONSIBILITY TO DETERMINE WHETHER, AND TO WHAT EXTENT, ANY TAXES APPLY TO ANY TRANSACTIONS YOU CONDUCT IN CONNECTION WITH YOUR USE OF PRYSM, AND TO WITHHOLD, COLLECT, REPORT AND REMIT THE CORRECT AMOUNTS OF TAXES TO THE APPROPRIATE TAX AUTHORITIES. DIGITAL ASSETS, BLOCKCHAIN TECHNOLOGY, AND ANY RELATED SOFTWARE AND SERVICES ARE ALSO SUBJECT TO LEGAL AND REGULATORY UNCERTAINTY IN THE UNITED STATES AND OTHER JURISDICTIONS. YOU UNDERSTAND THAT LEGISLATIVE AND REGULATORY CHANGES OR ACTIONS MAY ADVERSELY AFFECT THE USAGE, TRANSFERABILITY, TRANSACTABILITY AND ACCESSIBILITY RELATED TO PRYSM.
|
||||
|
||||
### Warranty Disclaimer
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with all Applicable Law (as defined below), including, without limitation, for the avoidance of doubt, local laws.
|
||||
|
||||
### Limitation of Liability
|
||||
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
Some Internet plans will charge additional amounts for bandwidth or any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to any such limitations and monitor your bandwidth use and upload volumes.
|
||||
|
||||
### Indemnification
|
||||
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
|
||||
damages, costs, and expenses (including reasonable attorneys’ fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
## Warranty Disclaimer
|
||||
|
||||
### Compliance with Laws and Tax Obligations
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
|
||||
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. WITHOUT LIMITING ANYTHING SET FORTH ELSEWHERE IN THESE TERMS, OFFCHAIN LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE OR OTHER GOVERNMENTAL AUTHORITY. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
|
||||
### Miscellaneous
|
||||
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
|
||||
## Limitation of Liability
|
||||
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
|
||||
IN NO EVENT WILL OFFCHAIN LABS OR ANY OF ITS AFFILIATES OR ITS OR ANY SUCH AFFILIATE’S DIRECTORS, OFFICERS, EMPLOYEES, AGENTS, OR REPRESENTATIVES OR ANY CONTRIBUTORS (COLLECTIVELY, THE “OCL PARTIES”) BE LIABLE, WHETHER IN CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE, WHETHER ACTIVE, PASSIVE OR IMPUTED), PRODUCT LIABILITY, STRICT LIABILITY OR OTHER THEORY, BREACH OF STATUTORY DUTY OR OTHERWISE ARISING OUT OF, OR IN CONNECTION WITH, YOUR USE OF PRYSM, FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING ANY LOSS OF PROFITS OR DATA, BUSINESS INTERRUPTION OR OTHER PECUNIARY LOSS, OR DAMAGE, LOSS OR OTHER COMPROMISE OF DATA, IN EACH CASE WHETHER DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL) ARISING OUT OF USE PRYSM, EVEN IF WE OR OTHER USERS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by Applicable Law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
|
||||
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
## Indemnification
|
||||
|
||||
To the maximum extent permitted by Applicable Law, you will defend, indemnify and hold each OCL Party harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction or other activity on Prysm), as well as any and all losses, liabilities, damages, costs, and expenses (including reasonable attorneys’ fees and costs) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
|
||||
## Compliance with Laws
|
||||
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal, regional, state, county, municipal or other governmental authority (collectively, “Applicable Law”) and you agree to comply with all such Applicable Law in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
These Terms will be governed by the laws of the State of Delaware without regard to its conflict of law provisions. With respect to any disputes or claims not subject to arbitration, as set forth in the OCL Terms of Use, you and Offchain Labs submit to the personal and exclusive jurisdiction of the state and federal courts located within New York, New York and waive any objection to such jurisdiction and venue. The failure of Offchain Labs to exercise or enforce any right or provision of these Terms will not constitute a waiver of such right or provision.
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your use of Prysm following any such revision to these Terms constitutes acceptance of such revised Terms.
|
||||
|
||||
These Terms constitute the entire agreement between you and Offchain Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
66
WORKSPACE
66
WORKSPACE
@@ -27,7 +27,23 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
zig_toolchains()
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -65,10 +81,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -110,10 +126,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -206,7 +222,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.7",
|
||||
go_version = "1.20.10",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -247,9 +263,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_test_version = "v1.4.0-beta.2-hotfix"
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.2"
|
||||
consensus_spec_version = "v1.4.0-beta.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -265,8 +279,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "99770a001189f66204a4ef79161c8002bcbbcbd8236f1c6479bd5b83a3c68d42",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "67ae5b8fc368853da23d4297e480a4b7f4722fb970d1c7e2b6a5b7faef9cb907",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -281,8 +295,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "56763f6492ee137108271007d62feef60d8e3f1698e53dee4bc4b07e55f7326b",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "82474f29fff4abd09fb1e71bafa98827e2573cf0ad02cf119610961831dc3bb5",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -297,8 +311,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "bc1cac1a991cdc7426efea14385dcf215df85ed3f0572b824ad6a1d7ca0c89ad",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "60e4b6eb6c341daab7ee5614a8e3f28567247c504c593b951bfe919622c8ef8f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -312,7 +326,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c5898001aaab2a5bb38a39ff9d17a52f1f9befcc26e63752cbf556040f0c884e",
|
||||
sha256 = "fdab9756c93a250219ff6a10d5a9faee1e2e6878a14508410409e307362c6991",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -374,6 +388,22 @@ http_archive(
|
||||
)
|
||||
|
||||
# External dependencies
|
||||
http_archive(
|
||||
name = "googleapis",
|
||||
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
|
||||
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
go = True,
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -13,6 +13,8 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -21,7 +23,6 @@ go_library(
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,16 +13,16 @@ import (
|
||||
"strconv"
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -178,12 +178,12 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*config.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting configSpecPath")
|
||||
}
|
||||
fsr := &v1.SpecResponse{}
|
||||
fsr := &config.GetSpecResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -284,7 +284,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -323,12 +323,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -876,7 +876,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: bitfield.Bitvector512(ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458")),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
@@ -7,7 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"//validator/rpc:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,17 +41,17 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
|
||||
if len(jsonlocal.Data) == 0 && len(jsonremote.Data) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
hexKeys := make(map[string]bool)
|
||||
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
for index := range jsonlocal.Data {
|
||||
hexKeys[jsonlocal.Data[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Keystores {
|
||||
hexKeys[jsonremote.Keystores[index].Pubkey] = true
|
||||
for index := range jsonremote.Data {
|
||||
hexKeys[jsonremote.Data[index].Pubkey] = true
|
||||
}
|
||||
keys := make([]string, 0)
|
||||
for k := range hexKeys {
|
||||
@@ -61,12 +61,12 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
}
|
||||
|
||||
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
|
||||
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
|
||||
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*rpc.ListKeystoresResponse, error) {
|
||||
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
|
||||
jsonlocal := &rpc.ListKeystoresResponse{}
|
||||
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse local keystore list")
|
||||
}
|
||||
@@ -74,14 +74,14 @@ func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.List
|
||||
}
|
||||
|
||||
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*rpc.ListRemoteKeysResponse, error) {
|
||||
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
|
||||
jsonremote := &rpc.ListRemoteKeysResponse{}
|
||||
if len(remoteBytes) != 0 {
|
||||
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse remote keystore list")
|
||||
@@ -107,13 +107,13 @@ func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []stri
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*rpc.GetFeeRecipientByPubkeyResponse, error) {
|
||||
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
|
||||
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
|
||||
feejson := &rpc.GetFeeRecipientByPubkeyResponse{}
|
||||
if err := json.Unmarshal(b, feejson); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse fee recipient")
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Gateway)(nil)
|
||||
@@ -211,19 +212,21 @@ func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientC
|
||||
// dialTCP creates a client connection via TCP.
|
||||
// "addr" must be a valid TCP address with a port number.
|
||||
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
security := grpc.WithInsecure()
|
||||
var security grpc.DialOption
|
||||
if len(g.cfg.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
security = grpc.WithTransportCredentials(creds)
|
||||
} else {
|
||||
// Use insecure credentials when there's no remote cert provided.
|
||||
security = grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
security,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
@@ -240,7 +243,7 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
|
||||
return d(addr, 0)
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(f),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
24
api/server/BUILD.bazel
Normal file
24
api/server/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,15 +1,13 @@
|
||||
package node
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
)
|
||||
|
||||
func middleware(next http.Handler) http.Handler {
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
helpers.NormalizeQueryValues(query)
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
54
api/server/middleware_test.go
Normal file
54
api/server/middleware_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -1,19 +0,0 @@
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
@@ -50,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
@@ -69,6 +70,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -45,7 +45,7 @@ type ForkchoiceFetcher interface {
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
@@ -51,7 +51,7 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
|
||||
@@ -70,11 +70,8 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
var d invalidBlockError
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
@@ -83,7 +80,8 @@ func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -96,7 +94,8 @@ func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -108,7 +107,8 @@ func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ func TestInvalidBlockRoot(t *testing.T) {
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
@@ -33,4 +36,9 @@ func TestInvalidRoots(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
|
||||
}
|
||||
|
||||
@@ -157,6 +157,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if hasAttr && payloadID != nil {
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -390,7 +395,7 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
return err
|
||||
}
|
||||
// No op if the sidecar does not exist.
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -12,7 +12,7 @@ import (
|
||||
// - Expected KZG commitments match the number of blobs in the block
|
||||
// - That the number of proofs match the number of blobs
|
||||
// - That the proofs are verified against the KZG commitments
|
||||
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
|
||||
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(commitments) != len(sidecars) {
|
||||
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
|
||||
len(commitments), len(sidecars))
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
sidecars := make([]*ethpb.BlobSidecar, 0)
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
|
||||
commitments := make([][]byte, 0)
|
||||
require.NoError(t, IsDataAvailable(commitments, sidecars))
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
@@ -63,7 +62,7 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < types.Epoch(params.BeaconConfig().AltairForkEpoch) {
|
||||
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
|
||||
@@ -147,15 +147,3 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlobSidecar(scs []*ethpb.BlobSidecar, startTime time.Time) {
|
||||
if len(scs) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(scs),
|
||||
"slot": scs[0].Slot,
|
||||
"block": hex.EncodeToString(scs[0].BlockRoot),
|
||||
"validationTime": time.Since(startTime),
|
||||
}).Debug("Synced new blob sidecars")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
@@ -164,6 +165,8 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockSychronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
|
||||
// the genesis timestamp is known (ClockWaiter) or which determine the genesis timestamp (ClockSetter).
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockSetter = gs
|
||||
@@ -172,9 +175,18 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncComplete sets a channel that is used to notify blockchain service that the node has synced to head.
|
||||
func WithSyncComplete(c chan struct{}) Option {
|
||||
return func(s *Service) error {
|
||||
s.syncComplete = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage sets the blob storage backend for the blockchain service.
|
||||
func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.blobStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -353,11 +352,15 @@ func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
missing, err := missingIndices(s.blobStorage, b.Root(), len(commitments))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
return err
|
||||
}
|
||||
return kzg.IsDataAvailable(commitments, sidecars)
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
// TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error.
|
||||
return errors.New("not all kzg commitments are available")
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
@@ -529,11 +532,24 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected int) (map[uint64]struct{}, error) {
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]struct{}, expected)
|
||||
for i := uint64(0); i < uint64(expected); i++ {
|
||||
if !indices[i] {
|
||||
missing[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
@@ -556,47 +572,23 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
switch {
|
||||
case err == nil:
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
}
|
||||
case errors.Is(err, db.ErrNotFound):
|
||||
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
|
||||
// Note: The system will not exit with an error in this scenario.
|
||||
default:
|
||||
log.WithError(err).Error("could not get blob sidecars from DB")
|
||||
missing, err := missingIndices(s.blobStorage, root, expected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
found := map[uint64]struct{}{}
|
||||
for _, sc := range sidecars {
|
||||
found[sc.Index] = struct{}{}
|
||||
}
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
found[idx] = struct{}{}
|
||||
if len(found) != expected {
|
||||
delete(missing, idx)
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
s.blobNotifiers.delete(root)
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
|
||||
@@ -1103,12 +1103,15 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
var lock sync.Mutex
|
||||
go func() {
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1116,7 +1119,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1124,7 +1129,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1132,7 +1139,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
@@ -3,21 +3,21 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.forRoot(root) <- index
|
||||
s.blobNotifiers.notifyIndex(root, index)
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) error {
|
||||
if err := s.blobStorage.Save(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -42,7 +43,7 @@ type BlockReceiver interface {
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -58,6 +59,11 @@ type SlashingReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block has been synced
|
||||
if s.InForkchoice(blockRoot) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
@@ -63,6 +64,7 @@ type Service struct {
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -94,6 +96,35 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
}
|
||||
seen[idx] = true
|
||||
bn.seenIndex[root] = seen
|
||||
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
bn.Unlock()
|
||||
|
||||
c <- idx
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
@@ -110,6 +141,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
func (bn *blobNotifierMap) delete(root [32]byte) {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
delete(bn.seenIndex, root)
|
||||
delete(bn.notifiers, root)
|
||||
}
|
||||
|
||||
@@ -126,6 +158,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -445,11 +446,10 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
|
||||
@@ -514,3 +514,48 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
s.G = g
|
||||
return s.Err
|
||||
}
|
||||
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
// Sample root and index
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
// Test that a new channel is created
|
||||
if _, ok := bn.notifiers[root]; !ok {
|
||||
t.Errorf("Notifier channel was not created")
|
||||
}
|
||||
|
||||
// Test notifying an already seen index
|
||||
bn.notifyIndex(root, 1)
|
||||
if len(bn.notifiers[root]) > 1 {
|
||||
t.Errorf("Notifier channel should not receive multiple messages for the same index")
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
// Test that the notifier channel receives the index
|
||||
select {
|
||||
case idx := <-bn.notifiers[root]:
|
||||
if idx != 1 {
|
||||
t.Errorf("Received index on channel is incorrect")
|
||||
}
|
||||
default:
|
||||
t.Errorf("Notifier channel did not receive the index")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -23,10 +24,13 @@ import (
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
|
||||
@@ -24,11 +24,11 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -23,11 +23,11 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -72,6 +72,7 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -573,7 +574,7 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, roo
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
@@ -612,6 +613,7 @@ func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (*ChainService) ReceiveBlob(_ context.Context, _ *ethpb.BlobSidecar) error {
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/checkpoint_state.go
vendored
4
beacon-chain/cache/checkpoint_state.go
vendored
@@ -42,7 +42,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.HashProto(d)
|
||||
depRoot, err := hash.Proto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
@@ -109,7 +109,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.HashProto(ctnr.Deposit)
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctrs := []*ethpb.DepositContainer{}
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -28,10 +28,10 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
return st
|
||||
}
|
||||
|
||||
@@ -69,6 +69,15 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no active validators, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, 0), // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "no active validator indices",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -42,7 +42,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
@@ -100,7 +99,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -55,7 +55,7 @@ func ProcessVoluntaryExits(
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), [32]byte(latestMixSlice)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -236,7 +235,7 @@ func BLSChangesSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
batch.PublicKeys[i] = publicKey
|
||||
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
|
||||
htr, err := signing.Data(change.Message.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
@@ -251,7 +250,7 @@ func BLSChangesSignatureBatch(
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.ReadOnlyBeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
change *ethpb.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -152,7 +151,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
@@ -1209,8 +1208,7 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
@@ -1274,8 +1272,7 @@ func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
@@ -1362,7 +1359,6 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
@@ -349,7 +349,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), mix); err != nil {
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), [32]byte(mix)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -136,6 +136,10 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
return nil, errors.New("no active validator indices")
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
@@ -560,12 +560,24 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "no active validator indices",
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators(tt.args.state.Validators))
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
|
||||
@@ -92,12 +92,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
// domain=domain,
|
||||
// ))
|
||||
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
|
||||
return SigningData(object.HashTreeRoot, domain)
|
||||
return Data(object.HashTreeRoot, domain)
|
||||
}
|
||||
|
||||
// SigningData computes the signing data by utilising the provided root function and then
|
||||
// Data computes the signing data by utilising the provided root function and then
|
||||
// returning the signing data of the container object.
|
||||
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
objRoot, err := rootFunc()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -152,7 +152,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := SigningData(blkHdr.HashTreeRoot, domain)
|
||||
root, err := Data(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
// utilize custom block hashing function
|
||||
root, err := SigningData(rootFunc, domain)
|
||||
root, err := Data(rootFunc, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -221,6 +222,18 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
blockRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, fieldparams.StateRootsLength)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
mixes := make([][]byte, fieldparams.RandaoMixesLength)
|
||||
for i := range mixes {
|
||||
mixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
@@ -229,6 +242,9 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: mixes,
|
||||
// Validator registry fields.
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestGenesisState_HashEquality(t *testing.T) {
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
|
||||
require.NoError(t, err)
|
||||
|
||||
root1, err1 := hash.HashProto(pbState1)
|
||||
root2, err2 := hash.HashProto(pbstate)
|
||||
root1, err1 := hash.Proto(pbState1)
|
||||
root2, err2 := hash.Proto(pbstate)
|
||||
|
||||
if err1 != nil || err2 != nil {
|
||||
t.Fatalf("Failed to marshal state to bytes: %v %v", err1, err2)
|
||||
|
||||
@@ -382,10 +382,18 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := transition.ProcessEpochPrecompute(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
|
||||
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
@@ -134,7 +134,7 @@ func SlashValidator(
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
epoch, churn := MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
// NewFileName uses the KVStoreDatafilePath so that if this layer of
|
||||
// indirection between db.NewDB->kv.NewKVStore ever changes, it will be easy to remember
|
||||
// to also change this filename indirection at the same time.
|
||||
func NewDBFilename(dirPath string) string {
|
||||
return kv.KVStoreDatafilePath(dirPath)
|
||||
func NewFileName(dirPath string) string {
|
||||
return kv.StoreDatafilePath(dirPath)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package db
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
)
|
||||
|
||||
// ErrNotFound can be used to determine if an error from a method in the database package
|
||||
// represents a "not found" error. These often require different handling than a low-level
|
||||
@@ -19,3 +24,9 @@ var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound) || os.IsNotExist(err)
|
||||
}
|
||||
|
||||
35
beacon-chain/db/filesystem/BUILD.bazel
Normal file
35
beacon-chain/db/filesystem/BUILD.bazel
Normal file
@@ -0,0 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["blob_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
176
beacon-chain/db/filesystem/blob.go
Normal file
176
beacon-chain/db/filesystem/blob.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name > MaxBlobsPerBlock")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
blobLockPath = "blob.lock"
|
||||
)
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := os.MkdirAll(base, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
return &BlobStorage{fs: fs}, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.ssz()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
// TODO: should it be an error to save a blob that already exists?
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := ssz.MarshalSSZ(sidecar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if err := bs.fs.Mkdir(fname.dir(), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partial()
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.ssz())
|
||||
var v blocks.VerifiedROBlob
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return v, err
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]bool, error) {
|
||||
var mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u > fieldparams.MaxBlobsPerBlock {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return fmt.Sprintf("%#x", p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
|
||||
}
|
||||
|
||||
func (p blobNamer) partial() string {
|
||||
return p.fname(partExt)
|
||||
}
|
||||
|
||||
func (p blobNamer) ssz() string {
|
||||
return p.fname(sszExt)
|
||||
}
|
||||
70
beacon-chain/db/filesystem/blob_test.go
Normal file
70
beacon-chain/db/filesystem/blob_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).ssz()
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
// No error when attempting to write twice.
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
|
||||
content, err := afero.ReadFile(fs, blobPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(existingSidecarData, content))
|
||||
|
||||
// Deserialize the BlobSidecar from the saved file data.
|
||||
savedSidecar := ðpb.BlobSidecar{}
|
||||
err = savedSidecar.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, existingSidecar.BlobSidecar, savedSidecar)
|
||||
|
||||
})
|
||||
t.Run("indices", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
sc := testSidecars[2]
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := [fieldparams.MaxBlobsPerBlock]bool{false, false, true}
|
||||
actualIdx, err := bs.Indices(actualSc.BlockRoot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedIdx, actualIdx)
|
||||
})
|
||||
|
||||
t.Run("round trip write then read", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
err := bs.Save(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := testSidecars[0]
|
||||
actual, err := bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
21
beacon-chain/db/filesystem/ephemeral.go
Normal file
21
beacon-chain/db/filesystem/ephemeral.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
|
||||
return &BlobStorage{fs: afero.NewMemMapFs()}
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
return fs, &BlobStorage{fs: fs}
|
||||
}
|
||||
@@ -56,8 +56,8 @@ type ReadOnlyDatabase interface {
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -95,8 +95,8 @@ type NoHeadAccessDatabase interface {
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.DeprecatedBlobSidecar) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
@@ -170,7 +170,7 @@ type SlasherDatabase interface {
|
||||
// Database interface with full access.
|
||||
type Database interface {
|
||||
io.Closer
|
||||
backup.BackupExporter
|
||||
backup.Exporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
|
||||
@@ -124,6 +124,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
|
||||
@@ -48,14 +48,14 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
|
||||
// Trying to replace a newer blob with an older one is an error.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errEmptySidecar
|
||||
}
|
||||
@@ -121,8 +121,9 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
|
||||
})
|
||||
}
|
||||
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
|
||||
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
|
||||
func validUniqueSidecars(scs []*ethpb.DeprecatedBlobSidecar) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, errEmptySidecar
|
||||
}
|
||||
@@ -166,7 +167,7 @@ func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
|
||||
// sortSidecars sorts the sidecars by their index.
|
||||
func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
func sortSidecars(scs []*ethpb.DeprecatedBlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
@@ -177,7 +178,7 @@ func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
@@ -206,7 +207,7 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
@@ -214,7 +215,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
@@ -225,11 +226,11 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// If the `indices` argument is omitted, all blobs for the slot will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -260,8 +261,8 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
// DeleteBlobSidecars returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -280,7 +281,7 @@ func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte)
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
|
||||
func blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
|
||||
key := slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
func equalBlobSlices(expect []*ethpb.DeprecatedBlobSidecar, got []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
@@ -91,10 +91,10 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -127,7 +127,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
@@ -136,7 +136,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
@@ -147,10 +147,10 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
@@ -165,7 +165,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -175,11 +175,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
@@ -191,11 +191,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
scs[i].Slot = primitives.Slot(i)
|
||||
scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{scs[i]}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{scs[i]}))
|
||||
br := bytesutil.ToBytes32(scs[i].BlockRoot)
|
||||
saved, err := db.BlobSidecarsByRoot(ctx, br)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices([]*ethpb.BlobSidecar{scs[i]}, saved))
|
||||
require.NoError(t, equalBlobSlices([]*ethpb.DeprecatedBlobSidecar{scs[i]}, saved))
|
||||
}
|
||||
})
|
||||
t.Run("saving a new blob for rotation (batch)", func(t *testing.T) {
|
||||
@@ -226,7 +226,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
@@ -238,7 +238,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
@@ -264,7 +264,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
@@ -278,7 +278,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
@@ -304,8 +304,8 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
|
||||
for i, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{eScs[i]}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{eScs[i]}))
|
||||
}
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
@@ -318,15 +318,15 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
|
||||
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -336,7 +336,7 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
return ðpb.BlobSidecar{
|
||||
return ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
@@ -348,15 +348,15 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
}
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
|
||||
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -367,7 +367,7 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
return ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
@@ -382,16 +382,16 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
|
||||
func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
scs []*ethpb.DeprecatedBlobSidecar
|
||||
err error
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "empty", scs: []*ethpb.DeprecatedBlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
{name: "invalid slot", scs: []*ethpb.DeprecatedBlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.DeprecatedBlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.DeprecatedBlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -408,43 +408,43 @@ func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
expected []*ethpb.BlobSidecar
|
||||
scs []*ethpb.DeprecatedBlobSidecar
|
||||
expected []*ethpb.DeprecatedBlobSidecar
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "duplicate sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "single sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "multiple duplicates",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "ok number after de-dupe, > 6 before",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "max unique, no dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
},
|
||||
{
|
||||
name: "too many unique",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
{
|
||||
name: "too many unique with dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
}
|
||||
@@ -462,7 +462,7 @@ func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{Index: 6},
|
||||
{Index: 4},
|
||||
{Index: 2},
|
||||
@@ -480,7 +480,7 @@ func TestStore_sortSidecars(t *testing.T) {
|
||||
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
s := setupDB(b)
|
||||
ctx := context.Background()
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
|
||||
}))
|
||||
|
||||
@@ -490,7 +490,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
r := make([]byte, 32)
|
||||
_, err := rand.Read(r)
|
||||
require.NoError(b, err)
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
@@ -502,7 +502,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
|
||||
}))
|
||||
|
||||
@@ -529,7 +529,7 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobRotatingKey(t *testing.T) {
|
||||
k := blobSidecarKey(ðpb.BlobSidecar{
|
||||
k := blobSidecarKey(ðpb.DeprecatedBlobSidecar{
|
||||
Slot: 1,
|
||||
BlockRoot: []byte{2},
|
||||
})
|
||||
|
||||
@@ -35,5 +35,5 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err := ConfigureBlobRetentionEpoch(cliCtx)
|
||||
require.ErrorContains(t, "extend-blob-retention-epoch smaller than spec default", err)
|
||||
require.ErrorContains(t, "blob-retention-epochs smaller than spec default", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -10,3 +15,9 @@ func init() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -92,10 +92,10 @@ type Store struct {
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// KVStoreDatafilePath is the canonical construction of a full
|
||||
// StoreDatafilePath is the canonical construction of a full
|
||||
// database file path from the directory path, so that code outside
|
||||
// this package can find the full path in a consistent way.
|
||||
func KVStoreDatafilePath(dirPath string) string {
|
||||
func StoreDatafilePath(dirPath string) string {
|
||||
return path.Join(dirPath, DatabaseFileName)
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
datafile := StoreDatafilePath(dirPath)
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
|
||||
@@ -22,7 +22,7 @@ func Restore(cliCtx *cli.Context) error {
|
||||
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
|
||||
|
||||
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
|
||||
if file.FileExists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
"deposit.go",
|
||||
"engine_client.go",
|
||||
"errors.go",
|
||||
@@ -82,7 +81,6 @@ go_test(
|
||||
srcs = [
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
@@ -96,7 +94,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -140,6 +137,5 @@ go_test(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -48,6 +48,8 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
exitRoutine := make(chan bool)
|
||||
|
||||
go func() {
|
||||
@@ -58,8 +60,6 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
header, err := web3Service.HeaderByNumber(web3Service.ctx, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
checkTransitionPollingInterval = time.Second * 10
|
||||
logTtdInterval = time.Minute
|
||||
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
|
||||
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
|
||||
"your node will not be able to complete the proof-of-stake transition"
|
||||
needsEnginePortLog = "Could not check execution client configuration. " +
|
||||
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
|
||||
"merge, you will need to connect to your " +
|
||||
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
|
||||
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
|
||||
"https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
)
|
||||
|
||||
// Checks the transition configuration between Prysm and the connected execution node to ensure
|
||||
// there are no differences in terminal block difficulty and block hash.
|
||||
// If there are any discrepancies, we must log errors to ensure users can resolve
|
||||
// the problem and be ready for the merge transition.
|
||||
func (s *Service) checkTransitionConfiguration(
|
||||
ctx context.Context, blockNotifications chan *feed.Event,
|
||||
) {
|
||||
// If Bellatrix fork epoch is not set, we do not run this check.
|
||||
if params.BeaconConfig().BellatrixForkEpoch == math.MaxUint64 {
|
||||
return
|
||||
}
|
||||
i := new(big.Int)
|
||||
i.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd := new(uint256.Int)
|
||||
ttd.SetFromBig(i)
|
||||
cfg := &pb.TransitionConfiguration{
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(), // A value of 0 is recommended in the request.
|
||||
}
|
||||
err := s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ErrConfigMismatch):
|
||||
log.WithError(err).Fatal(configMismatchLog)
|
||||
case errors.Is(err, ErrMethodNotFound):
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
default:
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
}
|
||||
|
||||
// We poll the execution client to see if the transition configuration has changed.
|
||||
// This serves as a heartbeat to ensure the execution client and Prysm are ready for the
|
||||
// Bellatrix hard-fork transition.
|
||||
ticker := time.NewTicker(checkTransitionPollingInterval)
|
||||
logTtdTicker := time.NewTicker(logTtdInterval)
|
||||
hasTtdReached := false
|
||||
defer ticker.Stop()
|
||||
defer logTtdTicker.Stop()
|
||||
sub := s.cfg.stateNotifier.StateFeed().Subscribe(blockNotifications)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sub.Err():
|
||||
return
|
||||
case ev := <-blockNotifications:
|
||||
data, ok := ev.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
isExecutionBlock, err := blocks.IsExecutionBlock(data.SignedBlock.Block().Body())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check whether signed block is execution block")
|
||||
continue
|
||||
}
|
||||
if isExecutionBlock {
|
||||
log.Debug("PoS transition is complete, no longer checking for configuration changes")
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
cancel()
|
||||
case <-logTtdTicker.C:
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.chainStartData.GetGenesisTime()))
|
||||
if currentEpoch >= params.BeaconConfig().BellatrixForkEpoch && !hasTtdReached {
|
||||
hasTtdReached, err = s.logTtdStatus(ctx, ttd)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not log ttd status")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We check if there is a configuration mismatch error between the execution client
|
||||
// and the Prysm beacon node. If so, we need to log errors in the node as it cannot successfully
|
||||
// complete the merge transition for the Bellatrix hard fork.
|
||||
func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
if err == nil {
|
||||
// If there is no error in checking the exchange configuration error, we clear
|
||||
// the run error of the service if we had previously set it to ErrConfigMismatch.
|
||||
if errors.Is(s.runError, ErrConfigMismatch) {
|
||||
s.runError = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
// If the error is a configuration mismatch, we set a runtime error in the service.
|
||||
if errors.Is(err, ErrConfigMismatch) {
|
||||
s.runError = err
|
||||
log.WithError(err).Error(configMismatchLog)
|
||||
return
|
||||
} else if errors.Is(err, ErrMethodNotFound) {
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
// Logs the terminal total difficulty status.
|
||||
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
|
||||
latest, err := s.LatestExecutionBlock(ctx)
|
||||
switch {
|
||||
case errors.Is(err, hexutil.ErrEmptyString):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
case latest == nil:
|
||||
return false, errors.New("latest block is nil")
|
||||
case latest.TotalDifficulty == "":
|
||||
return false, nil
|
||||
default:
|
||||
}
|
||||
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if latestTtd.Cmp(ttd.ToBig()) >= 0 {
|
||||
return true, nil
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"latestDifficulty": latestTtd.String(),
|
||||
"terminalDifficulty": ttd.ToBig().String(),
|
||||
"network": params.BeaconConfig().ConfigName,
|
||||
}).Info("Ready for The Merge")
|
||||
|
||||
totalTerminalDifficulty.Set(float64(latestTtd.Uint64()))
|
||||
return false, nil
|
||||
}
|
||||
@@ -1,260 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func Test_checkTransitionConfiguration(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
t.Run("context canceled", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go srv.checkTransitionConfiguration(ctx, make(chan *feed.Event, 1))
|
||||
<-time.After(100 * time.Millisecond)
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
|
||||
t.Run("block containing execution payload exits routine", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
exit := make(chan bool)
|
||||
notification := make(chan *feed.Event)
|
||||
go func() {
|
||||
srv.checkTransitionConfiguration(ctx, notification)
|
||||
exit <- true
|
||||
}()
|
||||
payload := emptyPayload()
|
||||
payload.GasUsed = 21000
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
}},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notification <- &feed.Event{
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
SignedBlock: wrappedBlock,
|
||||
},
|
||||
Type: statefeed.BlockProcessed,
|
||||
}
|
||||
<-exit
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "PoS transition is complete, no longer checking")
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_handleExchangeConfigurationError(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run("clears existing service error", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.runError = ErrConfigMismatch
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
})
|
||||
t.Run("does not clear existing service error if wrong kind", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
err := errors.New("something else went wrong")
|
||||
srv.runError = err
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.ErrorIs(t, err, srv.Status())
|
||||
})
|
||||
t.Run("sets service error on config mismatch", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(ErrConfigMismatch)
|
||||
require.Equal(t, ErrConfigMismatch, srv.Status())
|
||||
require.LogsContain(t, hook, configMismatchLog)
|
||||
})
|
||||
t.Run("does not set service error if unrelated problem", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(errors.New("foo"))
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
}
|
||||
|
||||
func setupTransitionConfigTest(t testing.TB) *Service {
|
||||
fix := fixtures()
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := &pb.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.Hash{},
|
||||
UncleHash: common.Hash{},
|
||||
Coinbase: common.Address{},
|
||||
Root: common.Hash{},
|
||||
TxHash: common.Hash{},
|
||||
ReceiptHash: common.Hash{},
|
||||
Bloom: gethtypes.Bloom{},
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(2),
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Time: 5,
|
||||
Extra: nil,
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: gethtypes.BlockNonce{},
|
||||
BaseFee: big.NewInt(6),
|
||||
},
|
||||
TotalDifficulty: "0x12345678",
|
||||
}
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, reached)
|
||||
|
||||
reached, err = service.logTtdStatus(context.Background(), ttd.SetUint64(323423484))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,6 @@ var (
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
ExchangeTransitionConfigurationMethod,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
}
|
||||
@@ -62,12 +61,10 @@ const (
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
BlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
@@ -89,7 +86,7 @@ type ForkchoiceUpdatedResponse struct {
|
||||
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type ExecutionPayloadReconstructor interface {
|
||||
type PayloadReconstructor interface {
|
||||
ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
@@ -106,9 +103,6 @@ type EngineCaller interface {
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
@@ -299,51 +293,6 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
func (s *Service) ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeTransitionConfiguration")
|
||||
defer span.End()
|
||||
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return handleRPCError(err)
|
||||
}
|
||||
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %#x from execution node, wanted %#x",
|
||||
result.TerminalBlockHash,
|
||||
cfgTerminalHash,
|
||||
)
|
||||
}
|
||||
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
|
||||
ttdResult, err := hexutil.DecodeBig(result.TerminalTotalDifficulty)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decode received terminal total difficulty")
|
||||
}
|
||||
if ttdResult.String() != ttdCfg {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %s from execution node, wanted %s",
|
||||
ttdResult.String(),
|
||||
ttdCfg,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
@@ -463,7 +412,7 @@ func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
|
||||
err := s.rpcClient.CallContext(
|
||||
ctx,
|
||||
result,
|
||||
ExecutionBlockByNumberMethod,
|
||||
BlockByNumberMethod,
|
||||
"latest",
|
||||
false, /* no full transaction objects */
|
||||
)
|
||||
@@ -476,7 +425,7 @@ func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash, wi
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlockByHash")
|
||||
defer span.End()
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, withTxs)
|
||||
err := s.rpcClient.CallContext(ctx, result, BlockByHashMethod, hash, withTxs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
@@ -495,7 +444,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
blk := &pb.ExecutionBlock{}
|
||||
newH := h
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: ExecutionBlockByHashMethod,
|
||||
Method: BlockByHashMethod,
|
||||
Args: []interface{}{newH, withTxs},
|
||||
Result: blk,
|
||||
Error: error(nil),
|
||||
@@ -517,7 +466,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByHashMethod, hash, false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByHashMethod, hash, false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -527,7 +476,7 @@ func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.He
|
||||
// HeaderByNumber returns the relevant header details for the provided block number.
|
||||
func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -622,9 +571,9 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
if len(blindedBlocks) == 0 {
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
}
|
||||
executionHashes := []common.Hash{}
|
||||
validExecPayloads := []int{}
|
||||
zeroExecPayloads := []int{}
|
||||
var executionHashes []common.Hash
|
||||
var validExecPayloads []int
|
||||
var zeroExecPayloads []int
|
||||
for i, b := range blindedBlocks {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -72,47 +71,6 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
|
||||
seed := &engine.TransitionConfigurationV1{
|
||||
TerminalTotalDifficulty: &ttd,
|
||||
TerminalBlockHash: valHash,
|
||||
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
|
||||
}
|
||||
|
||||
output, err := json.Marshal(seed)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &engine.TransitionConfigurationV1{}
|
||||
prysmResp := &pb.TransitionConfiguration{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
newGethResp := &engine.TransitionConfigurationV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
newGethResp2 := &engine.TransitionConfigurationV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &engine.ExecutionPayloadEnvelope{
|
||||
|
||||
@@ -33,13 +33,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -135,20 +134,14 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
err := srv.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := srv.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
@@ -644,7 +637,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -674,45 +667,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
encodedReq, err := json.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(encodedReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
err = client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -1190,78 +1145,6 @@ func Test_tDStringToUint256(t *testing.T) {
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong terminal block hash", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
t.Run("wrong terminal total difficulty", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
resp.TerminalTotalDifficulty = "0x1"
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
}
|
||||
|
||||
type customError struct {
|
||||
code int
|
||||
timeout bool
|
||||
@@ -1549,13 +1432,6 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
b, _ := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd, _ := uint256.FromBig(b)
|
||||
transitionCfg := &pb.TransitionConfiguration{
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(),
|
||||
}
|
||||
validStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_VALID,
|
||||
LatestValidHash: foo[:],
|
||||
@@ -1598,7 +1474,6 @@ func fixtures() map[string]interface{} {
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1905,17 +1780,6 @@ func (*testEngineService) GetPayloadV2(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ExchangeTransitionConfigurationV1(
|
||||
_ context.Context, _ *pb.TransitionConfiguration,
|
||||
) *pb.TransitionConfiguration {
|
||||
fix := fixtures()
|
||||
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
depositEventSignature = hash.HashKeccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
depositEventSignature = hash.Keccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
)
|
||||
|
||||
const eth1DataSavingInterval = 1000
|
||||
|
||||
@@ -6,10 +6,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
totalTerminalDifficulty = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_terminal_difficulty",
|
||||
Help: "The total terminal difficulty of the execution chain before merge",
|
||||
})
|
||||
newPayloadLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_payload_v1_latency_milliseconds",
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -208,13 +207,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.ensureValidPowchainData(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
eth1Data, err := s.validPowchainData(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
if err := s.initializeEth1Data(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
@@ -246,9 +241,6 @@ func (s *Service) Start() {
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
|
||||
go s.run(s.ctx.Done())
|
||||
}
|
||||
|
||||
@@ -758,6 +750,10 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
|
||||
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
|
||||
"Run with the --%s flag to resume normal operations.", features.EnableEIP4881.Name)
|
||||
}
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -822,23 +818,22 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error) {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Exit early if no genesis state is saved.
|
||||
if genState == nil || genState.IsNil() {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return eth1Data, nil
|
||||
}
|
||||
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
s.chainStartData = ðpb.ChainStartData{
|
||||
Chainstarted: true,
|
||||
@@ -856,22 +851,24 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
if features.Get().EnableEIP4881 {
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not SparseMerkleTrie")
|
||||
return nil, errors.New("deposit trie was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = trie.ToProto()
|
||||
}
|
||||
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
|
||||
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return eth1Data, nil
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
|
||||
@@ -571,7 +571,8 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -601,7 +602,8 @@ func TestService_InitializeCorrectly(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -636,7 +638,8 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
DepositContainers: []*ethpb.DepositContainer{{Index: 1}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -83,11 +83,6 @@ func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slo
|
||||
return p, nil, e.BuilderOverride, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// LatestExecutionBlock --
|
||||
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
|
||||
return e.ExecutionBlock, e.ErrLatestExecBlock
|
||||
|
||||
@@ -17,8 +17,8 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -31,9 +31,9 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -69,11 +69,11 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -554,24 +554,24 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
jc := ðpb.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
}
|
||||
ujc := &v1.Checkpoint{
|
||||
ujc := ðpb.Checkpoint{
|
||||
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
fc := &v1.Checkpoint{
|
||||
fc := ðpb.Checkpoint{
|
||||
Epoch: f.store.finalizedCheckpoint.Epoch,
|
||||
Root: f.store.finalizedCheckpoint.Root[:],
|
||||
}
|
||||
ufc := &v1.Checkpoint{
|
||||
ufc := ðpb.Checkpoint{
|
||||
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
|
||||
}
|
||||
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
|
||||
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
@@ -583,7 +583,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceDump{
|
||||
resp := &forkchoice2.Dump{
|
||||
JustifiedCheckpoint: jc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
FinalizedCheckpoint: fc,
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
@@ -151,7 +151,7 @@ func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
@@ -174,9 +174,9 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_OPTIMISTIC
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_VALID
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
@@ -237,7 +237,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*v1.ForkChoiceNode, 0)
|
||||
respNodes := make([]*forkchoice.Node, 0)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// BalancesByRooter is a handler to obtain the effective balances of the state
|
||||
@@ -62,7 +62,7 @@ type Getter interface {
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
ForkChoiceDump(context.Context) (*forkchoice2.Dump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
|
||||
@@ -52,14 +52,9 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconNodeHandler,
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterBeaconValidatorHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
}
|
||||
if enableDebugRPCEndpoints {
|
||||
ethRegistrations = append(ethRegistrations, ethpbservice.RegisterBeaconDebugHandler)
|
||||
}
|
||||
ethMux := gwruntime.NewServeMux(
|
||||
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{
|
||||
Marshaler: &gwruntime.JSONPb{
|
||||
|
||||
@@ -9,39 +9,12 @@ import (
|
||||
)
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
t.Run("Without debug endpoints", func(t *testing.T) {
|
||||
cfg := DefaultConfig(false, "eth,prysm")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.V1AlphaPbMux.Registrations))
|
||||
})
|
||||
|
||||
t.Run("With debug endpoints", func(t *testing.T) {
|
||||
cfg := DefaultConfig(true, "eth,prysm")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
|
||||
assert.Equal(t, 5, len(cfg.V1AlphaPbMux.Registrations))
|
||||
})
|
||||
t.Run("Without Prysm API", func(t *testing.T) {
|
||||
cfg := DefaultConfig(true, "eth")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 2, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
"node.go",
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"router.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/node",
|
||||
visibility = [
|
||||
@@ -17,6 +16,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
@@ -41,7 +42,6 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
apigateway "github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v4/beacon-chain/deterministic-genesis"
|
||||
@@ -111,6 +113,8 @@ type BeaconNode struct {
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStoragePath string
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -199,6 +203,14 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Blob Storage")
|
||||
blobStorage, err := filesystem.NewBlobStorage(beacon.BlobStoragePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beacon.BlobStorage = blobStorage
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
@@ -271,7 +283,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := mux.NewRouter()
|
||||
router.Use(middleware)
|
||||
router.Use(server.NormalizeQueryValuesHandler)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -294,9 +306,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
// db.NewDBFilename expands that to the canonical full path using
|
||||
// db.NewFileName expands that to the canonical full path using
|
||||
// the same construction as NewDB()
|
||||
c, err := newBeaconNodePromCollector(db.NewDBFilename(beacon.db.DatabasePath()))
|
||||
c, err := newBeaconNodePromCollector(db.NewFileName(beacon.db.DatabasePath()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -513,7 +525,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
@@ -638,6 +650,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -712,9 +725,11 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithExecutionPayloadReconstructor(web3Service),
|
||||
regularsync.WithPayloadReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"aggregated_test.go",
|
||||
"benchmark_test.go",
|
||||
"block_test.go",
|
||||
"forkchoice_test.go",
|
||||
"seen_bits_test.go",
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package kv_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations/kv"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
)
|
||||
|
||||
func BenchmarkAttCaches(b *testing.B) {
|
||||
ac := kv.NewAttCaches()
|
||||
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
assert.NoError(b, ac.SaveUnaggregatedAttestation(att))
|
||||
assert.NoError(b, ac.DeleteAggregatedAttestation(att))
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var hashFn = hash.HashProto
|
||||
var hashFn = hash.Proto
|
||||
|
||||
// AttCaches defines the caches used to satisfy attestation pool interface.
|
||||
// These caches are KV store for various attestations
|
||||
|
||||
@@ -119,7 +119,7 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []*ethpb.Attestation) erro
|
||||
// This checks if the attestation has previously been aggregated for fork choice
|
||||
// return true if yes, false if no.
|
||||
func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
|
||||
attRoot, err := hash.HashProto(att.Data)
|
||||
attRoot, err := hash.Proto(att.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user