mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
55 Commits
update-spe
...
blob-verif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a01a824b57 | ||
|
|
f1a1bffea6 | ||
|
|
71c04ef6a2 | ||
|
|
2d5e214086 | ||
|
|
41696e0e8e | ||
|
|
98e5278f32 | ||
|
|
977e5bafc7 | ||
|
|
d25b7d27ed | ||
|
|
e3300d1c59 | ||
|
|
b0aa26b5f8 | ||
|
|
29f00e61d2 | ||
|
|
3b33a95c96 | ||
|
|
632010665f | ||
|
|
c324242121 | ||
|
|
d8d34fc4ff | ||
|
|
4c381938e1 | ||
|
|
d4726f2866 | ||
|
|
4e3419e870 | ||
|
|
ac06362baf | ||
|
|
28aa11c976 | ||
|
|
798d5ec585 | ||
|
|
9b97f3fd92 | ||
|
|
0946b5853f | ||
|
|
1530d17977 | ||
|
|
e46f9c5631 | ||
|
|
3097601530 | ||
|
|
4a515c36e6 | ||
|
|
afaeff9d4c | ||
|
|
f663f605d2 | ||
|
|
12f7143c4f | ||
|
|
1f250f7e89 | ||
|
|
0f65e51d1e | ||
|
|
d1dd8471a3 | ||
|
|
7a6487b746 | ||
|
|
daa6d2e741 | ||
|
|
8a743a6430 | ||
|
|
c0fb16a96f | ||
|
|
57eda1de63 | ||
|
|
a54e61ecb0 | ||
|
|
27b4e32e1c | ||
|
|
b56bf00682 | ||
|
|
b24b60dbd8 | ||
|
|
dc9d34b41b | ||
|
|
2ef0b3526d | ||
|
|
047613069e | ||
|
|
159a5dd69d | ||
|
|
470ea6d717 | ||
|
|
b441f20e6a | ||
|
|
2ea5bff9c0 | ||
|
|
c2433ff854 | ||
|
|
82640b3d88 | ||
|
|
f925aded66 | ||
|
|
10a89fef13 | ||
|
|
56c65b8527 | ||
|
|
022ee17af9 |
1
.bazelrc
1
.bazelrc
@@ -27,6 +27,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
# Release flags
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
@@ -144,6 +144,11 @@ config_setting(
|
||||
values = {"define": "coverage_enabled=1"},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "pgo_enabled",
|
||||
values = {"define": "pgo_enabled=1"},
|
||||
)
|
||||
|
||||
common_files = {
|
||||
"//:LICENSE.md": "LICENSE.md",
|
||||
"//:README.md": "README.md",
|
||||
|
||||
@@ -1,45 +1,53 @@
|
||||
## Terms of Use
|
||||
# Terms of Use
|
||||
Effective as of November 2, 2023
|
||||
|
||||
Effective as of Oct 14, 2020
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Offchain Labs, Inc. (as successor in interest to Prysmatic Labs LLC) (referenced herein as “Offchain Labs”, “we” or “us”). If you do not agree to the Terms, do not download or use Prysm. Additionally, the Terms of Use available at https://arbitrum.io/tos (or any successor site, the “OCL Terms of Use”) are hereby incorporated by reference into these Terms. In the event of any conflict between provisions set forth herein and those set forth in the OCL Terms of Use, the provisions set forth herein shall control.
|
||||
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
## About Prysm
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
Prysm is a client implementation for the Ethereum blockchain’s consensus protocol. To participate in the network, a user must send ETH from the Ethereum mainnet blockchain to a validator deposit smart contract on Ethereum mainnet. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
## Licensing Terms
|
||||
Prysm is an open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
The Offchain Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Offchain Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
PLEASE READ THESE TERMS CAREFULLY, AS THE OCL TERMS OF USE INCORPORATED BY REFERENCE HEREIN CONTAIN AN AGREEMENT TO ARBITRATE AND OTHER IMPORTANT INFORMATION REGARDING YOUR LEGAL RIGHTS, REMEDIES, AND OBLIGATIONS. THE AGREEMENT TO ARBITRATE REQUIRES (WITH LIMITED EXCEPTION) THAT YOU SUBMIT CLAIMS YOU HAVE AGAINST US TO BINDING AND FINAL ARBITRATION, AND FURTHER (1) YOU WILL ONLY BE PERMITTED TO PURSUE CLAIMS AGAINST OFFCHAIN LABS ON AN INDIVIDUAL BASIS, NOT AS A PLAINTIFF OR CLASS MEMBER IN ANY CLASS OR REPRESENTATIVE ACTION OR PROCEEDING, (2) YOU WILL ONLY BE PERMITTED TO SEEK RELIEF (INCLUDING MONETARY, INJUNCTIVE, AND DECLARATORY RELIEF) ON AN INDIVIDUAL BASIS, AND (3) YOU MAY NOT BE ABLE TO HAVE ANY CLAIMS YOU HAVE AGAINST US RESOLVED BY A JURY OR IN A COURT OF LAW.
|
||||
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
## Risks of Operating Prysm
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money, tokens and value. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm, including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
|
||||
YOU ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY RISKS ASSOCIATED WITH YOUR USE OF PRYSM, AND CANNOT BE HELD LIABLE FOR ANY RESULTING LOSSES THAT YOU EXPERIENCE WHILE ACCESSING OR USING PRYSM.
|
||||
|
||||
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
|
||||
BY ACCESSING AND USING PRYSM, YOU REPRESENT AND WARRANT THAT YOU UNDERSTAND THE INHERENT RISKS ASSOCIATED WITH USING CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS, AND THAT YOU HAVE A WORKING KNOWLEDGE OF THE USAGE AND INTRICACIES OF DIGITAL ASSETS, SUCH AS THOSE FOLLOWING THE ETHEREUM TOKEN STANDARD (ERC-20). YOU FURTHER UNDERSTAND THAT THE MARKETS FOR DIGITAL ASSETS ARE HIGHLY VOLATILE DUE TO VARIOUS FACTORS, INCLUDING ADOPTION, SPECULATION, TECHNOLOGY, SECURITY, AND REGULATION. YOU ACKNOWLEDGE AND ACCEPT THAT THE COST AND SPEED OF TRANSACTING WITH CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS SUCH AS ETHEREUM ARE VARIABLE AND MAY INCREASE DRAMATICALLY AT ANY TIME. YOU UNDERSTAND THAT ANYONE CAN CREATE A TOKEN, INCLUDING FAKE VERSIONS OF EXISTING TOKENS AND TOKENS THAT FALSELY CLAIM TO REPRESENT PROJECTS, AND ACKNOWLEDGE AND ACCEPT THE RISK THAT YOU MAY MISTAKENLY INTERACT WITH THOSE OR OTHER TOKENS. YOU FURTHER ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY OF THE VARIABLES OR RISKS DESCRIBED IN THESE TERMS. YOU UNDERSTAND AND AGREE TO ASSUME FULL RESPONSIBILITY FOR ALL OF THE RISKS OF ACCESSING AND USING PRYSM. YOU ARE SOLELY RESPONSIBLE FOR YOUR WALLETS, FOR SAFEGUARDING THE ASSOCIATED PRIVATE KEY AND FOR ANY ACTIVITY THAT OCCURS USING YOUR WALLET. WITHOUT LIMITING THE FOREGOING, YOU ALSO UNDERSTAND THAT THERE MAY BE TAX AND REGULATORY RISKS RELATED TO USING PRYSM. IT IS YOUR SOLE RESPONSIBILITY TO DETERMINE WHETHER, AND TO WHAT EXTENT, ANY TAXES APPLY TO ANY TRANSACTIONS YOU CONDUCT IN CONNECTION WITH YOUR USE OF PRYSM, AND TO WITHHOLD, COLLECT, REPORT AND REMIT THE CORRECT AMOUNTS OF TAXES TO THE APPROPRIATE TAX AUTHORITIES. DIGITAL ASSETS, BLOCKCHAIN TECHNOLOGY, AND ANY RELATED SOFTWARE AND SERVICES ARE ALSO SUBJECT TO LEGAL AND REGULATORY UNCERTAINTY IN THE UNITED STATES AND OTHER JURISDICTIONS. YOU UNDERSTAND THAT LEGISLATIVE AND REGULATORY CHANGES OR ACTIONS MAY ADVERSELY AFFECT THE USAGE, TRANSFERABILITY, TRANSACTABILITY AND ACCESSIBILITY RELATED TO PRYSM.
|
||||
|
||||
### Warranty Disclaimer
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with all Applicable Law (as defined below), including, without limitation, for the avoidance of doubt, local laws.
|
||||
|
||||
### Limitation of Liability
|
||||
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
Some Internet plans will charge additional amounts for bandwidth or any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to any such limitations and monitor your bandwidth use and upload volumes.
|
||||
|
||||
### Indemnification
|
||||
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
|
||||
damages, costs, and expenses (including reasonable attorneys’ fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
## Warranty Disclaimer
|
||||
|
||||
### Compliance with Laws and Tax Obligations
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
|
||||
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. WITHOUT LIMITING ANYTHING SET FORTH ELSEWHERE IN THESE TERMS, OFFCHAIN LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE OR OTHER GOVERNMENTAL AUTHORITY. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
|
||||
### Miscellaneous
|
||||
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
|
||||
## Limitation of Liability
|
||||
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
|
||||
IN NO EVENT WILL OFFCHAIN LABS OR ANY OF ITS AFFILIATES OR ITS OR ANY SUCH AFFILIATE’S DIRECTORS, OFFICERS, EMPLOYEES, AGENTS, OR REPRESENTATIVES OR ANY CONTRIBUTORS (COLLECTIVELY, THE “OCL PARTIES”) BE LIABLE, WHETHER IN CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE, WHETHER ACTIVE, PASSIVE OR IMPUTED), PRODUCT LIABILITY, STRICT LIABILITY OR OTHER THEORY, BREACH OF STATUTORY DUTY OR OTHERWISE ARISING OUT OF, OR IN CONNECTION WITH, YOUR USE OF PRYSM, FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING ANY LOSS OF PROFITS OR DATA, BUSINESS INTERRUPTION OR OTHER PECUNIARY LOSS, OR DAMAGE, LOSS OR OTHER COMPROMISE OF DATA, IN EACH CASE WHETHER DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL) ARISING OUT OF USE PRYSM, EVEN IF WE OR OTHER USERS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by Applicable Law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
|
||||
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
## Indemnification
|
||||
|
||||
To the maximum extent permitted by Applicable Law, you will defend, indemnify and hold each OCL Party harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction or other activity on Prysm), as well as any and all losses, liabilities, damages, costs, and expenses (including reasonable attorneys’ fees and costs) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
|
||||
## Compliance with Laws
|
||||
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal, regional, state, county, municipal or other governmental authority (collectively, “Applicable Law”) and you agree to comply with all such Applicable Law in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
These Terms will be governed by the laws of the State of Delaware without regard to its conflict of law provisions. With respect to any disputes or claims not subject to arbitration, as set forth in the OCL Terms of Use, you and Offchain Labs submit to the personal and exclusive jurisdiction of the state and federal courts located within New York, New York and waive any objection to such jurisdiction and venue. The failure of Offchain Labs to exercise or enforce any right or provision of these Terms will not constitute a waiver of such right or provision.
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your use of Prysm following any such revision to these Terms constitutes acceptance of such revised Terms.
|
||||
|
||||
These Terms constitute the entire agreement between you and Offchain Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
36
WORKSPACE
36
WORKSPACE
@@ -27,7 +27,23 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
zig_toolchains()
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -247,9 +263,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_test_version = "v1.4.0-beta.2-hotfix"
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.2"
|
||||
consensus_spec_version = "v1.4.0-beta.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -265,8 +279,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "99770a001189f66204a4ef79161c8002bcbbcbd8236f1c6479bd5b83a3c68d42",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "67ae5b8fc368853da23d4297e480a4b7f4722fb970d1c7e2b6a5b7faef9cb907",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -281,8 +295,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "56763f6492ee137108271007d62feef60d8e3f1698e53dee4bc4b07e55f7326b",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "82474f29fff4abd09fb1e71bafa98827e2573cf0ad02cf119610961831dc3bb5",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -297,8 +311,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "bc1cac1a991cdc7426efea14385dcf215df85ed3f0572b824ad6a1d7ca0c89ad",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
sha256 = "60e4b6eb6c341daab7ee5614a8e3f28567247c504c593b951bfe919622c8ef8f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -312,7 +326,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c5898001aaab2a5bb38a39ff9d17a52f1f9befcc26e63752cbf556040f0c884e",
|
||||
sha256 = "fdab9756c93a250219ff6a10d5a9faee1e2e6878a14508410409e307362c6991",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -22,7 +23,6 @@ go_library(
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,17 +13,16 @@ import (
|
||||
"strconv"
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -179,12 +178,12 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*config.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting configSpecPath")
|
||||
}
|
||||
fsr := &v1.SpecResponse{}
|
||||
fsr := &config.GetSpecResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,14 +41,14 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Data) == 0 {
|
||||
if len(jsonlocal.Data) == 0 && len(jsonremote.Data) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
hexKeys := make(map[string]bool)
|
||||
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
for index := range jsonlocal.Data {
|
||||
hexKeys[jsonlocal.Data[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Data {
|
||||
hexKeys[jsonremote.Data[index].Pubkey] = true
|
||||
@@ -62,12 +61,12 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
}
|
||||
|
||||
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
|
||||
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
|
||||
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*rpc.ListKeystoresResponse, error) {
|
||||
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
|
||||
jsonlocal := &rpc.ListKeystoresResponse{}
|
||||
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse local keystore list")
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Gateway)(nil)
|
||||
@@ -211,19 +212,21 @@ func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientC
|
||||
// dialTCP creates a client connection via TCP.
|
||||
// "addr" must be a valid TCP address with a port number.
|
||||
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
security := grpc.WithInsecure()
|
||||
var security grpc.DialOption
|
||||
if len(g.cfg.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
security = grpc.WithTransportCredentials(creds)
|
||||
} else {
|
||||
// Use insecure credentials when there's no remote cert provided.
|
||||
security = grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
security,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
@@ -240,7 +243,7 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
|
||||
return d(addr, 0)
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(f),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
@@ -50,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
@@ -69,6 +70,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -45,7 +45,7 @@ type ForkchoiceFetcher interface {
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
@@ -51,7 +51,7 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
|
||||
@@ -157,6 +157,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if hasAttr && payloadID != nil {
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -12,7 +12,7 @@ import (
|
||||
// - Expected KZG commitments match the number of blobs in the block
|
||||
// - That the number of proofs match the number of blobs
|
||||
// - That the proofs are verified against the KZG commitments
|
||||
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
|
||||
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(commitments) != len(sidecars) {
|
||||
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
|
||||
len(commitments), len(sidecars))
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
sidecars := make([]*ethpb.BlobSidecar, 0)
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
|
||||
commitments := make([][]byte, 0)
|
||||
require.NoError(t, IsDataAvailable(commitments, sidecars))
|
||||
}
|
||||
|
||||
@@ -147,15 +147,3 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlobSidecar(scs []*ethpb.BlobSidecar, startTime time.Time) {
|
||||
if len(scs) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(scs),
|
||||
"slot": scs[0].Slot,
|
||||
"block": hex.EncodeToString(scs[0].BlockRoot),
|
||||
"validationTime": time.Since(startTime),
|
||||
}).Debug("Synced new blob sidecars")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
@@ -164,6 +165,8 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockSychronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
|
||||
// the genesis timestamp is known (ClockWaiter) or which determine the genesis timestamp (ClockSetter).
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockSetter = gs
|
||||
@@ -172,9 +175,18 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncComplete sets a channel that is used to notify blockchain service that the node has synced to head.
|
||||
func WithSyncComplete(c chan struct{}) Option {
|
||||
return func(s *Service) error {
|
||||
s.syncComplete = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage sets the blob storage backend for the blockchain service.
|
||||
func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.blobStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -353,11 +352,15 @@ func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
missing, err := missingIndices(s.blobStorage, b.Root(), len(commitments))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
return err
|
||||
}
|
||||
return kzg.IsDataAvailable(commitments, sidecars)
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
// TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error.
|
||||
return errors.New("not all kzg commitments are available")
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
@@ -529,11 +532,24 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected int) (map[uint64]struct{}, error) {
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]struct{}, expected)
|
||||
for i := uint64(0); i < uint64(expected); i++ {
|
||||
if !indices[i] {
|
||||
missing[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
@@ -556,55 +572,23 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
switch {
|
||||
case err == nil:
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
}
|
||||
case errors.Is(err, db.ErrNotFound):
|
||||
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
|
||||
// Note: The system will not exit with an error in this scenario.
|
||||
default:
|
||||
log.WithError(err).Error("could not get blob sidecars from DB")
|
||||
missing, err := missingIndices(s.blobStorage, root, expected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
found := map[uint64]struct{}{}
|
||||
for _, sc := range sidecars {
|
||||
found[sc.Index] = struct{}{}
|
||||
}
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
found[idx] = struct{}{}
|
||||
if len(found) != expected {
|
||||
delete(missing, idx)
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
s.blobNotifiers.delete(root)
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
@@ -13,11 +13,11 @@ func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) error {
|
||||
if err := s.blobStorage.Save(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ type BlockReceiver interface {
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
@@ -63,6 +64,7 @@ type Service struct {
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
|
||||
@@ -446,11 +446,10 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
|
||||
|
||||
@@ -24,11 +24,11 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -23,11 +23,11 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -72,7 +72,7 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []*ethpb.BlobSidecar
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -574,7 +574,7 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, roo
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
@@ -613,7 +613,7 @@ func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.BlobSidecar) error {
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package db
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
)
|
||||
|
||||
// ErrNotFound can be used to determine if an error from a method in the database package
|
||||
// represents a "not found" error. These often require different handling than a low-level
|
||||
@@ -19,3 +24,9 @@ var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound) || os.IsNotExist(err)
|
||||
}
|
||||
|
||||
35
beacon-chain/db/filesystem/BUILD.bazel
Normal file
35
beacon-chain/db/filesystem/BUILD.bazel
Normal file
@@ -0,0 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["blob_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
176
beacon-chain/db/filesystem/blob.go
Normal file
176
beacon-chain/db/filesystem/blob.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name > MaxBlobsPerBlock")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
blobLockPath = "blob.lock"
|
||||
)
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := os.MkdirAll(base, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
return &BlobStorage{fs: fs}, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.ssz()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
// TODO: should it be an error to save a blob that already exists?
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := ssz.MarshalSSZ(sidecar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if err := bs.fs.Mkdir(fname.dir(), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partial()
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.ssz())
|
||||
var v blocks.VerifiedROBlob
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return v, err
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]bool, error) {
|
||||
var mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u > fieldparams.MaxBlobsPerBlock {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return fmt.Sprintf("%#x", p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
|
||||
}
|
||||
|
||||
func (p blobNamer) partial() string {
|
||||
return p.fname(partExt)
|
||||
}
|
||||
|
||||
func (p blobNamer) ssz() string {
|
||||
return p.fname(sszExt)
|
||||
}
|
||||
70
beacon-chain/db/filesystem/blob_test.go
Normal file
70
beacon-chain/db/filesystem/blob_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).ssz()
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
// No error when attempting to write twice.
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
|
||||
content, err := afero.ReadFile(fs, blobPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(existingSidecarData, content))
|
||||
|
||||
// Deserialize the BlobSidecar from the saved file data.
|
||||
savedSidecar := ðpb.BlobSidecar{}
|
||||
err = savedSidecar.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, existingSidecar.BlobSidecar, savedSidecar)
|
||||
|
||||
})
|
||||
t.Run("indices", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
sc := testSidecars[2]
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := [fieldparams.MaxBlobsPerBlock]bool{false, false, true}
|
||||
actualIdx, err := bs.Indices(actualSc.BlockRoot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedIdx, actualIdx)
|
||||
})
|
||||
|
||||
t.Run("round trip write then read", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
err := bs.Save(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := testSidecars[0]
|
||||
actual, err := bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
21
beacon-chain/db/filesystem/ephemeral.go
Normal file
21
beacon-chain/db/filesystem/ephemeral.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
|
||||
return &BlobStorage{fs: afero.NewMemMapFs()}
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
return fs, &BlobStorage{fs: fs}
|
||||
}
|
||||
@@ -56,8 +56,8 @@ type ReadOnlyDatabase interface {
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -95,7 +95,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.DeprecatedBlobSidecar) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
|
||||
@@ -124,6 +124,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
|
||||
@@ -55,7 +55,7 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
|
||||
// Trying to replace a newer blob with an older one is an error.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errEmptySidecar
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
|
||||
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
|
||||
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
func validUniqueSidecars(scs []*ethpb.DeprecatedBlobSidecar) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, errEmptySidecar
|
||||
}
|
||||
@@ -167,7 +167,7 @@ func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
|
||||
// sortSidecars sorts the sidecars by their index.
|
||||
func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
func sortSidecars(scs []*ethpb.DeprecatedBlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
@@ -178,7 +178,7 @@ func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
@@ -207,7 +207,7 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
@@ -230,7 +230,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -281,7 +281,7 @@ func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
|
||||
func blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
|
||||
key := slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
func equalBlobSlices(expect []*ethpb.DeprecatedBlobSidecar, got []*ethpb.DeprecatedBlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
@@ -94,7 +94,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
@@ -150,7 +150,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
@@ -191,11 +191,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
scs[i].Slot = primitives.Slot(i)
|
||||
scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{scs[i]}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{scs[i]}))
|
||||
br := bytesutil.ToBytes32(scs[i].BlockRoot)
|
||||
saved, err := db.BlobSidecarsByRoot(ctx, br)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices([]*ethpb.BlobSidecar{scs[i]}, saved))
|
||||
require.NoError(t, equalBlobSlices([]*ethpb.DeprecatedBlobSidecar{scs[i]}, saved))
|
||||
}
|
||||
})
|
||||
t.Run("saving a new blob for rotation (batch)", func(t *testing.T) {
|
||||
@@ -226,7 +226,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
@@ -238,7 +238,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
@@ -264,7 +264,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
@@ -278,7 +278,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
for _, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
}
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
@@ -304,8 +304,8 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
|
||||
for i, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{eScs[i]}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{eScs[i]}))
|
||||
}
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
@@ -318,15 +318,15 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
|
||||
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -336,7 +336,7 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
return ðpb.BlobSidecar{
|
||||
return ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
@@ -348,15 +348,15 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
}
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
|
||||
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -367,7 +367,7 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
return ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
@@ -382,16 +382,16 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
|
||||
func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
scs []*ethpb.DeprecatedBlobSidecar
|
||||
err error
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "empty", scs: []*ethpb.DeprecatedBlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
{name: "invalid slot", scs: []*ethpb.DeprecatedBlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.DeprecatedBlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.DeprecatedBlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -408,43 +408,43 @@ func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
expected []*ethpb.BlobSidecar
|
||||
scs []*ethpb.DeprecatedBlobSidecar
|
||||
expected []*ethpb.DeprecatedBlobSidecar
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "duplicate sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "single sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "multiple duplicates",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "ok number after de-dupe, > 6 before",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "max unique, no dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
},
|
||||
{
|
||||
name: "too many unique",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
{
|
||||
name: "too many unique with dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
}
|
||||
@@ -462,7 +462,7 @@ func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{Index: 6},
|
||||
{Index: 4},
|
||||
{Index: 2},
|
||||
@@ -480,7 +480,7 @@ func TestStore_sortSidecars(t *testing.T) {
|
||||
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
s := setupDB(b)
|
||||
ctx := context.Background()
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
|
||||
}))
|
||||
|
||||
@@ -490,7 +490,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
r := make([]byte, 32)
|
||||
_, err := rand.Read(r)
|
||||
require.NoError(b, err)
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
@@ -502,7 +502,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
|
||||
}))
|
||||
|
||||
@@ -529,7 +529,7 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobRotatingKey(t *testing.T) {
|
||||
k := blobSidecarKey(ðpb.BlobSidecar{
|
||||
k := blobSidecarKey(ðpb.DeprecatedBlobSidecar{
|
||||
Slot: 1,
|
||||
BlockRoot: []byte{2},
|
||||
})
|
||||
|
||||
@@ -35,5 +35,5 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err := ConfigureBlobRetentionEpoch(cliCtx)
|
||||
require.ErrorContains(t, "extend-blob-retention-epoch smaller than spec default", err)
|
||||
require.ErrorContains(t, "blob-retention-epochs smaller than spec default", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -10,3 +15,9 @@ func init() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
"deposit.go",
|
||||
"engine_client.go",
|
||||
"errors.go",
|
||||
@@ -82,7 +81,6 @@ go_test(
|
||||
srcs = [
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
@@ -96,7 +94,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -140,6 +137,5 @@ go_test(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
checkTransitionPollingInterval = time.Second * 10
|
||||
logTtdInterval = time.Minute
|
||||
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
|
||||
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
|
||||
"your node will not be able to complete the proof-of-stake transition"
|
||||
needsEnginePortLog = "Could not check execution client configuration. " +
|
||||
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
|
||||
"merge, you will need to connect to your " +
|
||||
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
|
||||
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
|
||||
"https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
)
|
||||
|
||||
// Checks the transition configuration between Prysm and the connected execution node to ensure
|
||||
// there are no differences in terminal block difficulty and block hash.
|
||||
// If there are any discrepancies, we must log errors to ensure users can resolve
|
||||
// the problem and be ready for the merge transition.
|
||||
func (s *Service) checkTransitionConfiguration(
|
||||
ctx context.Context, blockNotifications chan *feed.Event,
|
||||
) {
|
||||
// If Bellatrix fork epoch is not set, we do not run this check.
|
||||
if params.BeaconConfig().BellatrixForkEpoch == math.MaxUint64 {
|
||||
return
|
||||
}
|
||||
i := new(big.Int)
|
||||
i.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd := new(uint256.Int)
|
||||
ttd.SetFromBig(i)
|
||||
cfg := &pb.TransitionConfiguration{
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(), // A value of 0 is recommended in the request.
|
||||
}
|
||||
err := s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ErrConfigMismatch):
|
||||
log.WithError(err).Fatal(configMismatchLog)
|
||||
case errors.Is(err, ErrMethodNotFound):
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
default:
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
}
|
||||
|
||||
// We poll the execution client to see if the transition configuration has changed.
|
||||
// This serves as a heartbeat to ensure the execution client and Prysm are ready for the
|
||||
// Bellatrix hard-fork transition.
|
||||
ticker := time.NewTicker(checkTransitionPollingInterval)
|
||||
logTtdTicker := time.NewTicker(logTtdInterval)
|
||||
hasTtdReached := false
|
||||
defer ticker.Stop()
|
||||
defer logTtdTicker.Stop()
|
||||
sub := s.cfg.stateNotifier.StateFeed().Subscribe(blockNotifications)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sub.Err():
|
||||
return
|
||||
case ev := <-blockNotifications:
|
||||
data, ok := ev.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
isExecutionBlock, err := blocks.IsExecutionBlock(data.SignedBlock.Block().Body())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check whether signed block is execution block")
|
||||
continue
|
||||
}
|
||||
if isExecutionBlock {
|
||||
log.Debug("PoS transition is complete, no longer checking for configuration changes")
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
cancel()
|
||||
case <-logTtdTicker.C:
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.chainStartData.GetGenesisTime()))
|
||||
if currentEpoch >= params.BeaconConfig().BellatrixForkEpoch && !hasTtdReached {
|
||||
hasTtdReached, err = s.logTtdStatus(ctx, ttd)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not log ttd status")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We check if there is a configuration mismatch error between the execution client
|
||||
// and the Prysm beacon node. If so, we need to log errors in the node as it cannot successfully
|
||||
// complete the merge transition for the Bellatrix hard fork.
|
||||
func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
if err == nil {
|
||||
// If there is no error in checking the exchange configuration error, we clear
|
||||
// the run error of the service if we had previously set it to ErrConfigMismatch.
|
||||
if errors.Is(s.runError, ErrConfigMismatch) {
|
||||
s.runError = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
// If the error is a configuration mismatch, we set a runtime error in the service.
|
||||
if errors.Is(err, ErrConfigMismatch) {
|
||||
s.runError = err
|
||||
log.WithError(err).Error(configMismatchLog)
|
||||
return
|
||||
} else if errors.Is(err, ErrMethodNotFound) {
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
// Logs the terminal total difficulty status.
|
||||
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
|
||||
latest, err := s.LatestExecutionBlock(ctx)
|
||||
switch {
|
||||
case errors.Is(err, hexutil.ErrEmptyString):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
case latest == nil:
|
||||
return false, errors.New("latest block is nil")
|
||||
case latest.TotalDifficulty == "":
|
||||
return false, nil
|
||||
default:
|
||||
}
|
||||
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if latestTtd.Cmp(ttd.ToBig()) >= 0 {
|
||||
return true, nil
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"latestDifficulty": latestTtd.String(),
|
||||
"terminalDifficulty": ttd.ToBig().String(),
|
||||
"network": params.BeaconConfig().ConfigName,
|
||||
}).Info("Ready for The Merge")
|
||||
|
||||
totalTerminalDifficulty.Set(float64(latestTtd.Uint64()))
|
||||
return false, nil
|
||||
}
|
||||
@@ -1,260 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func Test_checkTransitionConfiguration(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
t.Run("context canceled", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go srv.checkTransitionConfiguration(ctx, make(chan *feed.Event, 1))
|
||||
<-time.After(100 * time.Millisecond)
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
|
||||
t.Run("block containing execution payload exits routine", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
exit := make(chan bool)
|
||||
notification := make(chan *feed.Event)
|
||||
go func() {
|
||||
srv.checkTransitionConfiguration(ctx, notification)
|
||||
exit <- true
|
||||
}()
|
||||
payload := emptyPayload()
|
||||
payload.GasUsed = 21000
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
}},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notification <- &feed.Event{
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
SignedBlock: wrappedBlock,
|
||||
},
|
||||
Type: statefeed.BlockProcessed,
|
||||
}
|
||||
<-exit
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "PoS transition is complete, no longer checking")
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_handleExchangeConfigurationError(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run("clears existing service error", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.runError = ErrConfigMismatch
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
})
|
||||
t.Run("does not clear existing service error if wrong kind", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
err := errors.New("something else went wrong")
|
||||
srv.runError = err
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.ErrorIs(t, err, srv.Status())
|
||||
})
|
||||
t.Run("sets service error on config mismatch", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(ErrConfigMismatch)
|
||||
require.Equal(t, ErrConfigMismatch, srv.Status())
|
||||
require.LogsContain(t, hook, configMismatchLog)
|
||||
})
|
||||
t.Run("does not set service error if unrelated problem", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(errors.New("foo"))
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
}
|
||||
|
||||
func setupTransitionConfigTest(t testing.TB) *Service {
|
||||
fix := fixtures()
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := &pb.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.Hash{},
|
||||
UncleHash: common.Hash{},
|
||||
Coinbase: common.Address{},
|
||||
Root: common.Hash{},
|
||||
TxHash: common.Hash{},
|
||||
ReceiptHash: common.Hash{},
|
||||
Bloom: gethtypes.Bloom{},
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(2),
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Time: 5,
|
||||
Extra: nil,
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: gethtypes.BlockNonce{},
|
||||
BaseFee: big.NewInt(6),
|
||||
},
|
||||
TotalDifficulty: "0x12345678",
|
||||
}
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, reached)
|
||||
|
||||
reached, err = service.logTtdStatus(context.Background(), ttd.SetUint64(323423484))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,6 @@ var (
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
ExchangeTransitionConfigurationMethod,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
}
|
||||
@@ -62,8 +61,6 @@ const (
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -106,9 +103,6 @@ type EngineCaller interface {
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
@@ -299,51 +293,6 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
func (s *Service) ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeTransitionConfiguration")
|
||||
defer span.End()
|
||||
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return handleRPCError(err)
|
||||
}
|
||||
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %#x from execution node, wanted %#x",
|
||||
result.TerminalBlockHash,
|
||||
cfgTerminalHash,
|
||||
)
|
||||
}
|
||||
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
|
||||
ttdResult, err := hexutil.DecodeBig(result.TerminalTotalDifficulty)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decode received terminal total difficulty")
|
||||
}
|
||||
if ttdResult.String() != ttdCfg {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %s from execution node, wanted %s",
|
||||
ttdResult.String(),
|
||||
ttdCfg,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -72,47 +71,6 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
|
||||
seed := &engine.TransitionConfigurationV1{
|
||||
TerminalTotalDifficulty: &ttd,
|
||||
TerminalBlockHash: valHash,
|
||||
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
|
||||
}
|
||||
|
||||
output, err := json.Marshal(seed)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &engine.TransitionConfigurationV1{}
|
||||
prysmResp := &pb.TransitionConfiguration{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
newGethResp := &engine.TransitionConfigurationV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
newGethResp2 := &engine.TransitionConfigurationV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &engine.ExecutionPayloadEnvelope{
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -135,12 +134,6 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
err := srv.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -674,44 +667,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
encodedReq, err := json.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(encodedReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
err = client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
@@ -1190,78 +1145,6 @@ func Test_tDStringToUint256(t *testing.T) {
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong terminal block hash", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
t.Run("wrong terminal total difficulty", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
resp.TerminalTotalDifficulty = "0x1"
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
}
|
||||
|
||||
type customError struct {
|
||||
code int
|
||||
timeout bool
|
||||
@@ -1549,13 +1432,6 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
b, _ := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd, _ := uint256.FromBig(b)
|
||||
transitionCfg := &pb.TransitionConfiguration{
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(),
|
||||
}
|
||||
validStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_VALID,
|
||||
LatestValidHash: foo[:],
|
||||
@@ -1598,7 +1474,6 @@ func fixtures() map[string]interface{} {
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1905,17 +1780,6 @@ func (*testEngineService) GetPayloadV2(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ExchangeTransitionConfigurationV1(
|
||||
_ context.Context, _ *pb.TransitionConfiguration,
|
||||
) *pb.TransitionConfiguration {
|
||||
fix := fixtures()
|
||||
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
|
||||
@@ -6,10 +6,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
totalTerminalDifficulty = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_terminal_difficulty",
|
||||
Help: "The total terminal difficulty of the execution chain before merge",
|
||||
})
|
||||
newPayloadLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_payload_v1_latency_milliseconds",
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -242,9 +241,6 @@ func (s *Service) Start() {
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
|
||||
go s.run(s.ctx.Done())
|
||||
}
|
||||
|
||||
@@ -754,6 +750,10 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
|
||||
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
|
||||
"Run with the --%s flag to resume normal operations.", features.EnableEIP4881.Name)
|
||||
}
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -83,11 +83,6 @@ func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slo
|
||||
return p, nil, e.BuilderOverride, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// LatestExecutionBlock --
|
||||
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
|
||||
return e.ExecutionBlock, e.ErrLatestExecBlock
|
||||
|
||||
@@ -17,8 +17,8 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -31,9 +31,9 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -69,11 +69,11 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -554,24 +554,24 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
jc := ðpb.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
}
|
||||
ujc := &v1.Checkpoint{
|
||||
ujc := ðpb.Checkpoint{
|
||||
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
fc := &v1.Checkpoint{
|
||||
fc := ðpb.Checkpoint{
|
||||
Epoch: f.store.finalizedCheckpoint.Epoch,
|
||||
Root: f.store.finalizedCheckpoint.Root[:],
|
||||
}
|
||||
ufc := &v1.Checkpoint{
|
||||
ufc := ðpb.Checkpoint{
|
||||
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
|
||||
}
|
||||
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
|
||||
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
@@ -583,7 +583,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceDump{
|
||||
resp := &forkchoice2.Dump{
|
||||
JustifiedCheckpoint: jc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
FinalizedCheckpoint: fc,
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
@@ -151,7 +151,7 @@ func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
@@ -174,9 +174,9 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_OPTIMISTIC
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_VALID
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
@@ -237,7 +237,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*v1.ForkChoiceNode, 0)
|
||||
respNodes := make([]*forkchoice.Node, 0)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// BalancesByRooter is a handler to obtain the effective balances of the state
|
||||
@@ -62,7 +62,7 @@ type Getter interface {
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
ForkChoiceDump(context.Context) (*forkchoice2.Dump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
|
||||
@@ -53,12 +53,8 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterBeaconValidatorHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
}
|
||||
if enableDebugRPCEndpoints {
|
||||
ethRegistrations = append(ethRegistrations, ethpbservice.RegisterBeaconDebugHandler)
|
||||
}
|
||||
ethMux := gwruntime.NewServeMux(
|
||||
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{
|
||||
Marshaler: &gwruntime.JSONPb{
|
||||
|
||||
@@ -9,39 +9,12 @@ import (
|
||||
)
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
t.Run("Without debug endpoints", func(t *testing.T) {
|
||||
cfg := DefaultConfig(false, "eth,prysm")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 3, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.V1AlphaPbMux.Registrations))
|
||||
})
|
||||
|
||||
t.Run("With debug endpoints", func(t *testing.T) {
|
||||
cfg := DefaultConfig(true, "eth,prysm")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
|
||||
assert.Equal(t, 5, len(cfg.V1AlphaPbMux.Registrations))
|
||||
})
|
||||
t.Run("Without Prysm API", func(t *testing.T) {
|
||||
cfg := DefaultConfig(true, "eth")
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 2, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v4/beacon-chain/deterministic-genesis"
|
||||
@@ -112,6 +113,8 @@ type BeaconNode struct {
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStoragePath string
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -200,6 +203,14 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Blob Storage")
|
||||
blobStorage, err := filesystem.NewBlobStorage(beacon.BlobStoragePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beacon.BlobStorage = blobStorage
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
@@ -639,6 +650,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -717,6 +729,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"aggregated_test.go",
|
||||
"benchmark_test.go",
|
||||
"block_test.go",
|
||||
"forkchoice_test.go",
|
||||
"seen_bits_test.go",
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package kv_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations/kv"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
)
|
||||
|
||||
func BenchmarkAttCaches(b *testing.B) {
|
||||
ac := kv.NewAttCaches()
|
||||
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
assert.NoError(b, ac.SaveUnaggregatedAttestation(att))
|
||||
assert.NoError(b, ac.DeleteAggregatedAttestation(att))
|
||||
}
|
||||
}
|
||||
@@ -470,7 +470,7 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.SignedBlobSidecar{
|
||||
Message: ðpb.BlobSidecar{
|
||||
Message: ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
|
||||
Index: 1,
|
||||
Slot: 2,
|
||||
|
||||
@@ -21,7 +21,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
BlobSubnetTopicFormat: ðpb.BlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/blob:go_default_library",
|
||||
"//beacon-chain/rpc/eth/builder:go_default_library",
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/node:go_default_library",
|
||||
|
||||
@@ -12,14 +12,10 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -37,16 +33,12 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_gogo_protobuf//types:go_default_library",
|
||||
"@com_github_r3labs_sse_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,178 +2,17 @@ package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/r3labs/sse/v2"
|
||||
)
|
||||
|
||||
type sszConfig struct {
|
||||
fileName string
|
||||
responseJson SszResponse
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleProduceBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_blinded_beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
ssz := http2.SszRequested(req)
|
||||
if !ssz {
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponse, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
if respHasError {
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, config.responseJson); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respVersion, responseSsz, errJson := serializeMiddlewareResponseIntoSSZ(config.responseJson)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
if errJson := writeSSZResponseHeaderAndBody(grpcResponse, w, responseSsz, respVersion, config.fileName); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
req.RequestURI = ""
|
||||
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add new segments after handling parameters because it changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
|
||||
return nil
|
||||
}
|
||||
|
||||
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
j := SszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
|
||||
data, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(data))
|
||||
req.ContentLength = int64(len(data))
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoSSZ(respJson SszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
|
||||
// Serialize the SSZ part of the deserialized value.
|
||||
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
|
||||
if err != nil {
|
||||
return "", nil, apimiddleware.InternalServerErrorWithMessage(err, "could not decode response body into base64")
|
||||
}
|
||||
return strings.ToLower(respJson.SSZVersion()), data, nil
|
||||
}
|
||||
|
||||
func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWriter, respSsz []byte, respVersion, fileName string) apimiddleware.ErrorJson {
|
||||
var statusCodeHeader string
|
||||
for h, vs := range grpcResp.Header {
|
||||
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
|
||||
if strings.HasPrefix(h, "Grpc-Metadata") {
|
||||
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
|
||||
statusCodeHeader = vs[0]
|
||||
}
|
||||
} else {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
w.Header().Set(api.VersionHeader, respVersion)
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not parse status code")
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
} else {
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not write response message")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleEvents(m *apimiddleware.ApiProxyMiddleware, _ apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
sseClient := sse.NewClient("http://" + m.GatewayAddress + "/internal" + req.URL.RequestURI())
|
||||
sseClient.Headers["Grpc-Timeout"] = "0S"
|
||||
|
||||
@@ -4,165 +4,16 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/r3labs/sse/v2"
|
||||
)
|
||||
|
||||
type testSSZResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (t testSSZResponseJson) SSZVersion() string {
|
||||
return t.Version
|
||||
}
|
||||
|
||||
func (t testSSZResponseJson) SSZOptimistic() bool {
|
||||
return t.ExecutionOptimistic
|
||||
}
|
||||
|
||||
func (t testSSZResponseJson) SSZData() string {
|
||||
return t.Data
|
||||
}
|
||||
|
||||
func (t testSSZResponseJson) SSZFinalized() bool {
|
||||
return t.Finalized
|
||||
}
|
||||
|
||||
func TestPrepareSSZRequestForProxying(t *testing.T) {
|
||||
middleware := &apimiddleware.ApiProxyMiddleware{
|
||||
GatewayAddress: "http://apimiddleware.example",
|
||||
}
|
||||
endpoint := apimiddleware.Endpoint{
|
||||
Path: "http://foo.example",
|
||||
}
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example", &body)
|
||||
|
||||
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/internal/ssz", request.URL.Path)
|
||||
}
|
||||
|
||||
func TestPreparePostedSszData(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
body.Write([]byte("body"))
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
preparePostedSSZData(request)
|
||||
assert.Equal(t, int64(19), request.ContentLength)
|
||||
assert.Equal(t, api.JsonMediaType, request.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
j := testSSZResponseJson{
|
||||
Version: "Version",
|
||||
Data: "Zm9v",
|
||||
}
|
||||
v, ssz, errJson := serializeMiddlewareResponseIntoSSZ(j)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.DeepEqual(t, []byte("foo"), ssz)
|
||||
assert.Equal(t, "version", v)
|
||||
})
|
||||
|
||||
t.Run("invalid_data", func(t *testing.T) {
|
||||
j := testSSZResponseJson{
|
||||
Version: "Version",
|
||||
Data: "invalid",
|
||||
}
|
||||
_, _, errJson := serializeMiddlewareResponseIntoSSZ(j)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode response body into base64"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
|
||||
responseSsz := []byte("ssz")
|
||||
version := "version"
|
||||
fileName := "test.ssz"
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
|
||||
},
|
||||
}
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
v, ok := writer.Header()["Foo"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "foo", v[0])
|
||||
v, ok = writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "3", v[0])
|
||||
v, ok = writer.Header()["Content-Type"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, api.OctetStreamMediaType, v[0])
|
||||
v, ok = writer.Header()["Content-Disposition"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "attachment; filename=test.ssz", v[0])
|
||||
v, ok = writer.Header()[api.VersionHeader]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "version", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("no_grpc_status_code_header", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("invalid_status_code", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"invalid"},
|
||||
},
|
||||
}
|
||||
responseSsz := []byte("ssz")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not parse status code"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestReceiveEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan *sse.Event)
|
||||
|
||||
@@ -3,17 +3,14 @@ package apimiddleware
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
@@ -278,178 +275,3 @@ func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.Respo
|
||||
}
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
type phase0ProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockJson `json:"data"`
|
||||
}
|
||||
|
||||
type altairProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockAltairJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type capellaProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockContentsDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type capellaProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockContentsDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*ProduceBlockResponseV2Json)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0ProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0Block,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebContents,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*ProduceBlindedBlockResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0ProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0Block,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixProduceBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaProduceBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebProduceBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebContents,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func prepareForkChoiceResponse(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
dump, ok := response.(*ForkChoiceDumpJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("response is not of the correct type"))
|
||||
}
|
||||
|
||||
nodes := make([]*ForkChoiceNodeResponseJson, len(dump.ForkChoiceNodes))
|
||||
for i, n := range dump.ForkChoiceNodes {
|
||||
nodes[i] = &ForkChoiceNodeResponseJson{
|
||||
Slot: n.Slot,
|
||||
BlockRoot: n.BlockRoot,
|
||||
ParentRoot: n.ParentRoot,
|
||||
JustifiedEpoch: n.JustifiedEpoch,
|
||||
FinalizedEpoch: n.FinalizedEpoch,
|
||||
Weight: n.Weight,
|
||||
Validity: n.Validity,
|
||||
ExecutionBlockHash: n.ExecutionBlockHash,
|
||||
ExtraData: &ForkChoiceNodeExtraDataJson{
|
||||
UnrealizedJustifiedEpoch: n.UnrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.UnrealizedFinalizedEpoch,
|
||||
Balance: n.Balance,
|
||||
ExecutionOptimistic: n.ExecutionOptimistic,
|
||||
TimeStamp: n.TimeStamp,
|
||||
},
|
||||
}
|
||||
}
|
||||
forkChoice := &ForkChoiceResponseJson{
|
||||
JustifiedCheckpoint: dump.JustifiedCheckpoint,
|
||||
FinalizedCheckpoint: dump.FinalizedCheckpoint,
|
||||
ForkChoiceNodes: nodes,
|
||||
ExtraData: &ForkChoiceResponseExtraDataJson{
|
||||
BestJustifiedCheckpoint: dump.BestJustifiedCheckpoint,
|
||||
UnrealizedJustifiedCheckpoint: dump.UnrealizedJustifiedCheckpoint,
|
||||
UnrealizedFinalizedCheckpoint: dump.UnrealizedFinalizedCheckpoint,
|
||||
ProposerBoostRoot: dump.ProposerBoostRoot,
|
||||
PreviousProposerBoostRoot: dump.PreviousProposerBoostRoot,
|
||||
HeadRoot: dump.HeadRoot,
|
||||
},
|
||||
}
|
||||
|
||||
result, err := json.Marshal(forkChoice)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("could not marshal fork choice to JSON"))
|
||||
}
|
||||
return false, result, nil
|
||||
}
|
||||
|
||||
@@ -9,10 +9,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -359,437 +357,3 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeProducedV2Block(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &BeaconBlockContainerV2Json{
|
||||
Phase0Block: &BeaconBlockJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &phase0ProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &BeaconBlockContainerV2Json{
|
||||
AltairBlock: &BeaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &altairProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &BeaconBlockContainerV2Json{
|
||||
BellatrixBlock: &BeaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &capellaProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &BeaconBlockContainerV2Json{
|
||||
CapellaBlock: &BeaconBlockCapellaJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyCapellaJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &capellaProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: ethpbv2.Version_DENEB.String(),
|
||||
Data: &BeaconBlockContainerV2Json{
|
||||
DenebContents: &BeaconBlockContentsDenebJson{
|
||||
Block: &BeaconBlockDenebJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyDenebJson{},
|
||||
},
|
||||
BlobSidecars: []*BlobSidecarJson{{
|
||||
BlockRoot: "root",
|
||||
Index: "1",
|
||||
Slot: "1",
|
||||
BlockParentRoot: "root",
|
||||
ProposerIndex: "1",
|
||||
Blob: "blob",
|
||||
KzgCommitment: "kzgcommitment",
|
||||
KzgProof: "kzgproof",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &denebProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data.Block
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
require.Equal(t, 1, len(resp.Data.BlobSidecars))
|
||||
sidecar := resp.Data.BlobSidecars[0]
|
||||
assert.Equal(t, "root", sidecar.BlockRoot)
|
||||
assert.Equal(t, "1", sidecar.Index)
|
||||
assert.Equal(t, "1", sidecar.Slot)
|
||||
assert.Equal(t, "root", sidecar.BlockParentRoot)
|
||||
assert.Equal(t, "1", sidecar.ProposerIndex)
|
||||
assert.Equal(t, "blob", sidecar.Blob)
|
||||
assert.Equal(t, "kzgcommitment", sidecar.KzgCommitment)
|
||||
assert.Equal(t, "kzgproof", sidecar.KzgProof)
|
||||
})
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported block version", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeProduceBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &ProduceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &BlindedBeaconBlockContainerJson{
|
||||
Phase0Block: &BeaconBlockJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
AltairBlock: nil,
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &phase0ProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &ProduceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &BlindedBeaconBlockContainerJson{
|
||||
AltairBlock: &BeaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &altairProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &ProduceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &BlindedBeaconBlockContainerJson{
|
||||
BellatrixBlock: &BlindedBeaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BlindedBeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &bellatrixProduceBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &ProduceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &BlindedBeaconBlockContainerJson{
|
||||
CapellaBlock: &BlindedBeaconBlockCapellaJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BlindedBeaconBlockBodyCapellaJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &capellaProduceBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported block version", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareForkChoiceResponse(t *testing.T) {
|
||||
dump := &ForkChoiceDumpJson{
|
||||
JustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "justified",
|
||||
Root: "justified",
|
||||
},
|
||||
FinalizedCheckpoint: &CheckpointJson{
|
||||
Epoch: "finalized",
|
||||
Root: "finalized",
|
||||
},
|
||||
BestJustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "best_justified",
|
||||
Root: "best_justified",
|
||||
},
|
||||
UnrealizedJustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "unrealized_justified",
|
||||
Root: "unrealized_justified",
|
||||
},
|
||||
UnrealizedFinalizedCheckpoint: &CheckpointJson{
|
||||
Epoch: "unrealized_finalized",
|
||||
Root: "unrealized_finalized",
|
||||
},
|
||||
ProposerBoostRoot: "proposer_boost_root",
|
||||
PreviousProposerBoostRoot: "previous_proposer_boost_root",
|
||||
HeadRoot: "head_root",
|
||||
ForkChoiceNodes: []*ForkChoiceNodeJson{
|
||||
{
|
||||
Slot: "node1_slot",
|
||||
BlockRoot: "node1_block_root",
|
||||
ParentRoot: "node1_parent_root",
|
||||
JustifiedEpoch: "node1_justified_epoch",
|
||||
FinalizedEpoch: "node1_finalized_epoch",
|
||||
UnrealizedJustifiedEpoch: "node1_unrealized_justified_epoch",
|
||||
UnrealizedFinalizedEpoch: "node1_unrealized_finalized_epoch",
|
||||
Balance: "node1_balance",
|
||||
Weight: "node1_weight",
|
||||
ExecutionOptimistic: false,
|
||||
ExecutionBlockHash: "node1_execution_block_hash",
|
||||
TimeStamp: "node1_time_stamp",
|
||||
Validity: "node1_validity",
|
||||
},
|
||||
{
|
||||
Slot: "node2_slot",
|
||||
BlockRoot: "node2_block_root",
|
||||
ParentRoot: "node2_parent_root",
|
||||
JustifiedEpoch: "node2_justified_epoch",
|
||||
FinalizedEpoch: "node2_finalized_epoch",
|
||||
UnrealizedJustifiedEpoch: "node2_unrealized_justified_epoch",
|
||||
UnrealizedFinalizedEpoch: "node2_unrealized_finalized_epoch",
|
||||
Balance: "node2_balance",
|
||||
Weight: "node2_weight",
|
||||
ExecutionOptimistic: true,
|
||||
ExecutionBlockHash: "node2_execution_block_hash",
|
||||
TimeStamp: "node2_time_stamp",
|
||||
Validity: "node2_validity",
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errorJson := prepareForkChoiceResponse(dump)
|
||||
assert.Equal(t, nil, errorJson)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
result := &ForkChoiceResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, result))
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, "justified", result.JustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "justified", result.JustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Epoch)
|
||||
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Root)
|
||||
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Epoch)
|
||||
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Root)
|
||||
assert.Equal(t, "proposer_boost_root", result.ExtraData.ProposerBoostRoot)
|
||||
assert.Equal(t, "previous_proposer_boost_root", result.ExtraData.PreviousProposerBoostRoot)
|
||||
assert.Equal(t, "head_root", result.ExtraData.HeadRoot)
|
||||
require.Equal(t, 2, len(result.ForkChoiceNodes))
|
||||
node1 := result.ForkChoiceNodes[0]
|
||||
require.NotNil(t, node1)
|
||||
assert.Equal(t, "node1_slot", node1.Slot)
|
||||
assert.Equal(t, "node1_block_root", node1.BlockRoot)
|
||||
assert.Equal(t, "node1_parent_root", node1.ParentRoot)
|
||||
assert.Equal(t, "node1_justified_epoch", node1.JustifiedEpoch)
|
||||
assert.Equal(t, "node1_finalized_epoch", node1.FinalizedEpoch)
|
||||
assert.Equal(t, "node1_unrealized_justified_epoch", node1.ExtraData.UnrealizedJustifiedEpoch)
|
||||
assert.Equal(t, "node1_unrealized_finalized_epoch", node1.ExtraData.UnrealizedFinalizedEpoch)
|
||||
assert.Equal(t, "node1_balance", node1.ExtraData.Balance)
|
||||
assert.Equal(t, "node1_weight", node1.Weight)
|
||||
assert.Equal(t, false, node1.ExtraData.ExecutionOptimistic)
|
||||
assert.Equal(t, "node1_execution_block_hash", node1.ExecutionBlockHash)
|
||||
assert.Equal(t, "node1_time_stamp", node1.ExtraData.TimeStamp)
|
||||
assert.Equal(t, "node1_validity", node1.Validity)
|
||||
node2 := result.ForkChoiceNodes[1]
|
||||
require.NotNil(t, node2)
|
||||
assert.Equal(t, "node2_slot", node2.Slot)
|
||||
assert.Equal(t, "node2_block_root", node2.BlockRoot)
|
||||
assert.Equal(t, "node2_parent_root", node2.ParentRoot)
|
||||
assert.Equal(t, "node2_justified_epoch", node2.JustifiedEpoch)
|
||||
assert.Equal(t, "node2_finalized_epoch", node2.FinalizedEpoch)
|
||||
assert.Equal(t, "node2_unrealized_justified_epoch", node2.ExtraData.UnrealizedJustifiedEpoch)
|
||||
assert.Equal(t, "node2_unrealized_finalized_epoch", node2.ExtraData.UnrealizedFinalizedEpoch)
|
||||
assert.Equal(t, "node2_balance", node2.ExtraData.Balance)
|
||||
assert.Equal(t, "node2_weight", node2.Weight)
|
||||
assert.Equal(t, true, node2.ExtraData.ExecutionOptimistic)
|
||||
assert.Equal(t, "node2_execution_block_hash", node2.ExecutionBlockHash)
|
||||
assert.Equal(t, "node2_time_stamp", node2.ExtraData.TimeStamp)
|
||||
assert.Equal(t, "node2_validity", node2.Validity)
|
||||
}
|
||||
|
||||
@@ -16,18 +16,8 @@ func (f *BeaconEndpointFactory) IsNil() bool {
|
||||
// Paths is a collection of all valid beacon chain API paths.
|
||||
func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/beacon/pool/attester_slashings",
|
||||
"/eth/v1/beacon/pool/proposer_slashings",
|
||||
"/eth/v1/beacon/weak_subjectivity",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/fork_choice",
|
||||
"/eth/v1/config/fork_schedule",
|
||||
"/eth/v1/config/spec",
|
||||
"/eth/v1/events",
|
||||
"/eth/v1/validator/blocks/{slot}",
|
||||
"/eth/v2/validator/blocks/{slot}",
|
||||
"/eth/v1/validator/blinded_blocks/{slot}",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,49 +25,10 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, error) {
|
||||
endpoint := apimiddleware.DefaultEndpoint()
|
||||
switch path {
|
||||
case "/eth/v1/beacon/pool/attester_slashings":
|
||||
endpoint.PostRequest = &AttesterSlashingJson{}
|
||||
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
|
||||
case "/eth/v1/beacon/pool/proposer_slashings":
|
||||
endpoint.PostRequest = &ProposerSlashingJson{}
|
||||
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
|
||||
case "/eth/v1/beacon/weak_subjectivity":
|
||||
endpoint.GetResponse = &WeakSubjectivityResponse{}
|
||||
case "/eth/v1/debug/beacon/heads":
|
||||
endpoint.GetResponse = &ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v2/debug/beacon/heads":
|
||||
endpoint.GetResponse = &V2ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v1/debug/fork_choice":
|
||||
endpoint.GetResponse = &ForkChoiceDumpJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: prepareForkChoiceResponse,
|
||||
}
|
||||
case "/eth/v1/config/fork_schedule":
|
||||
endpoint.GetResponse = &ForkScheduleResponseJson{}
|
||||
case "/eth/v1/config/spec":
|
||||
endpoint.GetResponse = &SpecResponseJson{}
|
||||
case "/eth/v1/events":
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleEvents}
|
||||
case "/eth/v1/validator/blocks/{slot}":
|
||||
endpoint.GetResponse = &ProduceBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
|
||||
case "/eth/v2/validator/blocks/{slot}":
|
||||
endpoint.GetResponse = &ProduceBlockResponseV2Json{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
|
||||
case "/eth/v1/validator/blinded_blocks/{slot}":
|
||||
endpoint.GetResponse = &ProduceBlindedBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
)
|
||||
|
||||
//----------------
|
||||
@@ -26,48 +23,10 @@ type BlockRootResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type AttesterSlashingsPoolResponseJson struct {
|
||||
Data []*AttesterSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
type ProposerSlashingsPoolResponseJson struct {
|
||||
Data []*ProposerSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
type ForkChoiceHeadsResponseJson struct {
|
||||
Data []*ForkChoiceHeadJson `json:"data"`
|
||||
}
|
||||
|
||||
type V2ForkChoiceHeadsResponseJson struct {
|
||||
Data []*V2ForkChoiceHeadJson `json:"data"`
|
||||
}
|
||||
|
||||
type ForkScheduleResponseJson struct {
|
||||
Data []*ForkJson `json:"data"`
|
||||
}
|
||||
|
||||
type DepositContractResponseJson struct {
|
||||
Data *DepositContractJson `json:"data"`
|
||||
}
|
||||
|
||||
type SpecResponseJson struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type ProduceBlockResponseJson struct {
|
||||
Data *BeaconBlockJson `json:"data"`
|
||||
}
|
||||
|
||||
type ProduceBlockResponseV2Json struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockContainerV2Json `json:"data"`
|
||||
}
|
||||
|
||||
type ProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockContainerJson `json:"data"`
|
||||
}
|
||||
|
||||
type AggregateAttestationResponseJson struct {
|
||||
Data *AttestationJson `json:"data"`
|
||||
}
|
||||
@@ -645,17 +604,6 @@ type PendingAttestationJson struct {
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
}
|
||||
|
||||
type ForkChoiceHeadJson struct {
|
||||
Root string `json:"root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type V2ForkChoiceHeadJson struct {
|
||||
Root string `json:"root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type DepositContractJson struct {
|
||||
ChainId string `json:"chain_id"`
|
||||
Address string `json:"address"`
|
||||
@@ -691,34 +639,6 @@ type SyncCommitteeContributionJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
Weight string `json:"weight"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Validity string `json:"validity" enum:"true"`
|
||||
}
|
||||
|
||||
type ForkChoiceDumpJson struct {
|
||||
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
|
||||
}
|
||||
|
||||
type HistoricalSummaryJson struct {
|
||||
BlockSummaryRoot string `json:"block_summary_root" hex:"true"`
|
||||
StateSummaryRoot string `json:"state_summary_root" hex:"true"`
|
||||
@@ -740,49 +660,6 @@ type SszResponse interface {
|
||||
SSZFinalized() bool
|
||||
}
|
||||
|
||||
type SszResponseJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *SszResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (*SszResponseJson) SSZVersion() string {
|
||||
return strings.ToLower(ethpbv2.Version_PHASE0.String())
|
||||
}
|
||||
|
||||
func (*SszResponseJson) SSZOptimistic() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*SszResponseJson) SSZFinalized() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type VersionedSSZResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *VersionedSSZResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (ssz *VersionedSSZResponseJson) SSZVersion() string {
|
||||
return ssz.Version
|
||||
}
|
||||
|
||||
func (ssz *VersionedSSZResponseJson) SSZOptimistic() bool {
|
||||
return ssz.ExecutionOptimistic
|
||||
}
|
||||
|
||||
func (ssz *VersionedSSZResponseJson) SSZFinalized() bool {
|
||||
return ssz.Finalized
|
||||
}
|
||||
|
||||
// ---------------
|
||||
// Events.
|
||||
// ---------------
|
||||
|
||||
@@ -4,13 +4,11 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"pool.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
@@ -52,10 +50,8 @@ go_library(
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -67,7 +63,6 @@ go_library(
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -76,13 +71,11 @@ go_test(
|
||||
srcs = [
|
||||
"blinded_blocks_test.go",
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
"handlers_validators_test.go",
|
||||
"init_test.go",
|
||||
"pool_test.go",
|
||||
"server_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -121,10 +114,8 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -133,13 +124,11 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1688,22 +1688,6 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
http2.WriteJson(w, &GetCommitteesResponse{Data: committees, ExecutionOptimistic: isOptimistic, Finalized: isFinalized})
|
||||
}
|
||||
|
||||
// GetDepositContract retrieves deposit contract address and genesis fork version.
|
||||
func (*Server) GetDepositContract(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.GetDepositContract")
|
||||
defer span.End()
|
||||
|
||||
http2.WriteJson(w, &DepositContractResponse{
|
||||
Data: &struct {
|
||||
ChainId string `json:"chain_id"`
|
||||
Address string `json:"address"`
|
||||
}{
|
||||
ChainId: strconv.FormatUint(params.BeaconConfig().DepositChainID, 10),
|
||||
Address: params.BeaconConfig().DepositContractAddress,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GetBlockHeaders retrieves block headers matching given query. By default it will fetch current head slot blocks.
|
||||
func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockHeaders")
|
||||
@@ -1747,6 +1731,11 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(blks) == 0 {
|
||||
http2.HandleError(w, "No blocks found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
isOptimistic := false
|
||||
isFinalized := true
|
||||
blkHdrs := make([]*shared.SignedBeaconBlockHeaderContainer, len(blks))
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -27,6 +28,8 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const broadcastBLSChangesRateLimit = 128
|
||||
|
||||
// ListAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -443,3 +446,138 @@ func (s *Server) ListBLSToExecutionChanges(w http.ResponseWriter, r *http.Reques
|
||||
Data: changes,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAttesterSlashings retrieves attester slashings known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (s *Server) GetAttesterSlashings(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetAttesterSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
slashings := shared.AttesterSlashingsFromConsensus(sourceSlashings)
|
||||
|
||||
http2.WriteJson(w, &GetAttesterSlashingsResponse{Data: slashings})
|
||||
}
|
||||
|
||||
// SubmitAttesterSlashing submits an attester slashing object to node's pool and
|
||||
// if passes validation node MUST broadcast it to network.
|
||||
func (s *Server) SubmitAttesterSlashing(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
var req shared.AttesterSlashing
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
slashing, err := req.ToConsensus()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not convert request slashing to consensus slashing: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
headState, err := s.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slashing.Attestation_1.Data.Slot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not process slots: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = blocks.VerifyAttesterSlashing(ctx, headState, slashing)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Invalid attester slashing: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = s.SlashingsPool.InsertAttesterSlashing(ctx, headState, slashing)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not insert attester slashing into pool: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
|
||||
http2.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerSlashings retrieves proposer slashings known by the node
|
||||
// but not necessarily incorporated into any block.
|
||||
func (s *Server) GetProposerSlashings(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetProposerSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
sourceSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
slashings := shared.ProposerSlashingsFromConsensus(sourceSlashings)
|
||||
|
||||
http2.WriteJson(w, &GetProposerSlashingsResponse{Data: slashings})
|
||||
}
|
||||
|
||||
// SubmitProposerSlashing submits a proposer slashing object to node's pool and if
|
||||
// passes validation node MUST broadcast it to network.
|
||||
func (s *Server) SubmitProposerSlashing(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
var req shared.ProposerSlashing
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
slashing, err := req.ToConsensus()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not convert request slashing to consensus slashing: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
headState, err := s.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slashing.Header_1.Header.Slot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not process slots: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = blocks.VerifyProposerSlashing(headState, slashing)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Invalid proposer slashing: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err = s.SlashingsPool.InsertProposerSlashing(ctx, headState, slashing)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not insert proposer slashing into pool: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
|
||||
http2.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
blstoexecmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec/mock"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
@@ -997,6 +999,563 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
slashing1 := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{2, 20},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
},
|
||||
}
|
||||
slashing2 := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 30},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 3,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 30,
|
||||
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{4, 40},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 4,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 40,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []*ethpbv1alpha1.AttesterSlashing{slashing1, slashing2}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashings(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
}
|
||||
|
||||
func TestGetProposerSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
slashing1 := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 2,
|
||||
ProposerIndex: 2,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
},
|
||||
}
|
||||
slashing2 := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 3,
|
||||
ProposerIndex: 3,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot3"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot3"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot3"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 4,
|
||||
ProposerIndex: 4,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot4"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot4"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot4"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingPropSlashings: []*ethpbv1alpha1.ProposerSlashing{slashing1, slashing2}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerSlashings(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetProposerSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validator := ðpbv1alpha1.Validator{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = []*ethpbv1alpha1.Validator{validator}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
slashing := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
for _, att := range []*ethpbv1alpha1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(bs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
|
||||
b, err := json.Marshal(toSubmit[0])
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashing(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, slashing, pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, att := range []*ethpbv1alpha1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
|
||||
b, err := json.Marshal(toSubmit[0])
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashing(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, slashing, pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashing(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validator := ðpbv1alpha1.Validator{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = []*ethpbv1alpha1.Validator{validator}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
slashing := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
for _, h := range []*ethpbv1alpha1.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
bs,
|
||||
slots.ToEpoch(h.Header.Slot),
|
||||
h.Header,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
|
||||
b, err := json.Marshal(toSubmit[0])
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitProposerSlashing(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, slashing, pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, h := range []*ethpbv1alpha1.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
newBs,
|
||||
slots.ToEpoch(h.Header.Slot),
|
||||
h.Header,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
|
||||
b, err := json.Marshal(toSubmit[0])
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitProposerSlashing(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, slashing, pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidProposerSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitProposerSlashing(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid proposer slashing", e.Message)
|
||||
}
|
||||
|
||||
var (
|
||||
singleAtt = `[
|
||||
{
|
||||
@@ -1142,4 +1701,68 @@ var (
|
||||
"signature": "foo"
|
||||
}
|
||||
]`
|
||||
// signatures are invalid
|
||||
invalidAttesterSlashing = `{
|
||||
"attestation_1": {
|
||||
"attesting_indices": [
|
||||
"1"
|
||||
],
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
"data": {
|
||||
"slot": "1",
|
||||
"index": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "1",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "1",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
}
|
||||
}
|
||||
},
|
||||
"attestation_2": {
|
||||
"attesting_indices": [
|
||||
"1"
|
||||
],
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
"data": {
|
||||
"slot": "1",
|
||||
"index": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "1",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "1",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
// signatures are invalid
|
||||
invalidProposerSlashing = `{
|
||||
"signed_header_1": {
|
||||
"message": {
|
||||
"slot": "1",
|
||||
"proposer_index": "1",
|
||||
"parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"signed_header_2": {
|
||||
"message": {
|
||||
"slot": "1",
|
||||
"proposer_index": "1",
|
||||
"parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
)
|
||||
|
||||
@@ -2670,6 +2670,7 @@ func TestGetBlockHeaders(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetBlockHeadersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
@@ -2721,6 +2722,7 @@ func TestGetBlockHeaders(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetBlockHeadersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
@@ -2764,6 +2766,7 @@ func TestGetBlockHeaders(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetBlockHeadersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
@@ -2777,6 +2780,7 @@ func TestGetBlockHeaders(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetBlockHeadersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
@@ -2789,10 +2793,25 @@ func TestGetBlockHeaders(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetBlockHeadersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("no blocks found", func(t *testing.T) {
|
||||
urlWithParams := fmt.Sprintf("%s?parent_root=%s", url, hexutil.Encode(bytes.Repeat([]byte{1}, 32)))
|
||||
request := httptest.NewRequest(http.MethodGet, urlWithParams, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
bs.GetBlockHeaders(writer, request)
|
||||
require.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "No blocks found", e.Message)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3128,23 +3147,3 @@ func TestGetGenesis(t *testing.T) {
|
||||
assert.StringContains(t, "Chain genesis info is not yet known", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetDepositContract(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DepositChainID = uint64(10)
|
||||
config.DepositContractAddress = "0x4242424242424242424242424242424242424242"
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/eth/v1/beacon/states/{state_id}/finality_checkpoints", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s := &Server{}
|
||||
s.GetDepositContract(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
response := DepositContractResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &response))
|
||||
assert.Equal(t, "10", response.Data.ChainId)
|
||||
assert.Equal(t, "0x4242424242424242424242424242424242424242", response.Data.Address)
|
||||
}
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
const broadcastBLSChangesRateLimit = 128
|
||||
|
||||
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.AttesterSlashingsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttesterSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
sourceSlashings := bs.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
slashings := make([]*ethpbv1.AttesterSlashing, len(sourceSlashings))
|
||||
for i, s := range sourceSlashings {
|
||||
slashings[i] = migration.V1Alpha1AttSlashingToV1(s)
|
||||
}
|
||||
|
||||
return ðpbv1.AttesterSlashingsPoolResponse{
|
||||
Data: slashings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitAttesterSlashing submits AttesterSlashing object to node's pool and
|
||||
// if passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.AttesterSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.Attestation_1.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1AttSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyAttesterSlashing(ctx, headState, alphaSlashing)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid attester slashing: %v", err)
|
||||
}
|
||||
|
||||
err = bs.SlashingsPool.InsertAttesterSlashing(ctx, headState, alphaSlashing)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// ListPoolProposerSlashings retrieves proposer slashings known by the node
|
||||
// but not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolProposerSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ProposerSlashingPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolProposerSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
sourceSlashings := bs.SlashingsPool.PendingProposerSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
slashings := make([]*ethpbv1.ProposerSlashing, len(sourceSlashings))
|
||||
for i, s := range sourceSlashings {
|
||||
slashings[i] = migration.V1Alpha1ProposerSlashingToV1(s)
|
||||
}
|
||||
|
||||
return ðpbv1.ProposerSlashingPoolResponse{
|
||||
Data: slashings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitProposerSlashing submits AttesterSlashing object to node's pool and if
|
||||
// passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.ProposerSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.SignedHeader_1.Message.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1ProposerSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyProposerSlashing(headState, alphaSlashing)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid proposer slashing: %v", err)
|
||||
}
|
||||
|
||||
err = bs.SlashingsPool.InsertProposerSlashing(ctx, headState, alphaSlashing)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
@@ -1,558 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
|
||||
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestListPoolAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
slashing1 := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{2, 20},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
},
|
||||
}
|
||||
slashing2 := ðpbv1alpha1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 30},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 3,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 30,
|
||||
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{4, 40},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 4,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 40,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []*ethpbv1alpha1.AttesterSlashing{slashing1, slashing2}},
|
||||
}
|
||||
|
||||
resp, err := s.ListPoolAttesterSlashings(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.DeepEqual(t, migration.V1Alpha1AttSlashingToV1(slashing1), resp.Data[0])
|
||||
assert.DeepEqual(t, migration.V1Alpha1AttSlashingToV1(slashing2), resp.Data[1])
|
||||
}
|
||||
|
||||
func TestListPoolProposerSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
slashing1 := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 2,
|
||||
ProposerIndex: 2,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
},
|
||||
}
|
||||
slashing2 := ðpbv1alpha1.ProposerSlashing{
|
||||
Header_1: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 3,
|
||||
ProposerIndex: 3,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot3"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot3"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot3"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
},
|
||||
Header_2: ðpbv1alpha1.SignedBeaconBlockHeader{
|
||||
Header: ðpbv1alpha1.BeaconBlockHeader{
|
||||
Slot: 4,
|
||||
ProposerIndex: 4,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot4"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot4"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot4"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingPropSlashings: []*ethpbv1alpha1.ProposerSlashing{slashing1, slashing2}},
|
||||
}
|
||||
|
||||
resp, err := s.ListPoolProposerSlashings(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.DeepEqual(t, migration.V1Alpha1ProposerSlashingToV1(slashing1), resp.Data[0])
|
||||
assert.DeepEqual(t, migration.V1Alpha1ProposerSlashingToV1(slashing2), resp.Data[1])
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validator := ðpbv1alpha1.Validator{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = []*ethpbv1alpha1.Validator{validator}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
slashing := ðpbv1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 1,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 1,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(bs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttesterSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttesterSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
attestation := ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 1,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
slashing := ðpbv1.AttesterSlashing{
|
||||
Attestation_1: attestation,
|
||||
Attestation_2: attestation,
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttesterSlashing(ctx, slashing)
|
||||
require.ErrorContains(t, "Invalid attester slashing", err)
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validator := ðpbv1alpha1.Validator{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = []*ethpbv1alpha1.Validator{validator}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
slashing := ðpbv1.ProposerSlashing{
|
||||
SignedHeader_1: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
SignedHeader_2: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
bs,
|
||||
slots.ToEpoch(h.Message.Slot),
|
||||
h.Message,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.ProposerSlashing{
|
||||
SignedHeader_1: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
SignedHeader_2: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
newBs,
|
||||
slots.ToEpoch(h.Message.Slot),
|
||||
h.Message,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
require.Equal(t, 1, broadcaster.NumMessages())
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
header := ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
slashing := ðpbv1.ProposerSlashing{
|
||||
SignedHeader_1: header,
|
||||
SignedHeader_2: header,
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.ErrorContains(t, "Invalid proposer slashing", err)
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
}
|
||||
@@ -20,13 +20,6 @@ type GetCommitteesResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type DepositContractResponse struct {
|
||||
Data *struct {
|
||||
ChainId string `json:"chain_id"`
|
||||
Address string `json:"address"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*shared.Attestation `json:"data"`
|
||||
}
|
||||
@@ -180,3 +173,11 @@ type SyncCommitteeValidators struct {
|
||||
type BLSToExecutionChangesPoolResponse struct {
|
||||
Data []*shared.SignedBLSToExecutionChange `json:"data"`
|
||||
}
|
||||
|
||||
type GetAttesterSlashingsResponse struct {
|
||||
Data []*shared.AttesterSlashing `json:"data"`
|
||||
}
|
||||
|
||||
type GetProposerSlashingsResponse struct {
|
||||
Data []*shared.ProposerSlashing `json:"data"`
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.Blobs")
|
||||
defer span.End()
|
||||
|
||||
var sidecars []*eth.BlobSidecar
|
||||
var sidecars []*eth.DeprecatedBlobSidecar
|
||||
var root []byte
|
||||
|
||||
indices := parseIndices(r.URL)
|
||||
@@ -142,7 +142,7 @@ loop:
|
||||
return indices
|
||||
}
|
||||
|
||||
func buildSidecarsResponse(sidecars []*eth.BlobSidecar) *SidecarsResponse {
|
||||
func buildSidecarsResponse(sidecars []*eth.DeprecatedBlobSidecar) *SidecarsResponse {
|
||||
resp := &SidecarsResponse{Data: make([]*Sidecar, len(sidecars))}
|
||||
for i, sc := range sidecars {
|
||||
resp.Data[i] = &Sidecar{
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestBlobs(t *testing.T) {
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
blockroot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.BlobSidecar{
|
||||
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.DeprecatedBlobSidecar{
|
||||
{
|
||||
BlockRoot: blockroot,
|
||||
Index: 0,
|
||||
@@ -272,7 +272,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "could not parse block ID"))
|
||||
})
|
||||
t.Run("ssz", func(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.BlobSidecar{
|
||||
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.DeprecatedBlobSidecar{
|
||||
{
|
||||
BlockRoot: blockroot,
|
||||
Index: 0,
|
||||
|
||||
35
beacon-chain/rpc/eth/config/BUILD.bazel
Normal file
35
beacon-chain/rpc/eth/config/BUILD.bazel
Normal file
@@ -0,0 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["handlers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,36 +1,48 @@
|
||||
package beacon
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetDepositContract retrieves deposit contract address and genesis fork version.
|
||||
func GetDepositContract(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetDepositContract")
|
||||
defer span.End()
|
||||
|
||||
http2.WriteJson(w, &GetDepositContractResponse{
|
||||
Data: &DepositContractData{
|
||||
ChainId: strconv.FormatUint(params.BeaconConfig().DepositChainID, 10),
|
||||
Address: params.BeaconConfig().DepositContractAddress,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all scheduled upcoming forks this node is aware of.
|
||||
func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.ForkScheduleResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetForkSchedule")
|
||||
func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
if len(schedule) == 0 {
|
||||
return ðpb.ForkScheduleResponse{
|
||||
Data: make([]*ethpb.Fork, 0),
|
||||
}, nil
|
||||
http2.WriteJson(w, &GetForkScheduleResponse{
|
||||
Data: make([]*shared.Fork, 0),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
versions := forks.SortedForkVersions(schedule)
|
||||
chainForks := make([]*ethpb.Fork, len(schedule))
|
||||
chainForks := make([]*shared.Fork, len(schedule))
|
||||
var previous, current []byte
|
||||
for i, v := range versions {
|
||||
if i == 0 {
|
||||
@@ -40,31 +52,32 @@ func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.
|
||||
}
|
||||
copyV := v
|
||||
current = copyV[:]
|
||||
chainForks[i] = ðpb.Fork{
|
||||
PreviousVersion: previous,
|
||||
CurrentVersion: current,
|
||||
Epoch: schedule[v],
|
||||
chainForks[i] = &shared.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous),
|
||||
CurrentVersion: hexutil.Encode(current),
|
||||
Epoch: fmt.Sprintf("%d", schedule[v]),
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ForkScheduleResponse{
|
||||
http2.WriteJson(w, &GetForkScheduleResponse{
|
||||
Data: chainForks,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetSpec retrieves specification configuration (without Phase 1 params) used on this node. Specification params list
|
||||
// Values are returned with following format:
|
||||
// - any value starting with 0x in the spec is returned as a hex string.
|
||||
// - all other values are returned as number.
|
||||
func (_ *Server) GetSpec(ctx context.Context, _ *emptypb.Empty) (*ethpb.SpecResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetSpec")
|
||||
func GetSpec(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetSpec")
|
||||
defer span.End()
|
||||
|
||||
data, err := prepareConfigSpec()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Failed to prepare spec data: %v", err)
|
||||
http2.HandleError(w, "Could not prepare config spec: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
return ðpb.SpecResponse{Data: data}, nil
|
||||
http2.WriteJson(w, &GetSpecResponse{Data: data})
|
||||
}
|
||||
|
||||
func prepareConfigSpec() (map[string]string, error) {
|
||||
@@ -1,20 +1,43 @@
|
||||
package beacon
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestGetDepositContract(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DepositChainID = uint64(10)
|
||||
config.DepositContractAddress = "0x4242424242424242424242424242424242424242"
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/deposit_contract", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
GetDepositContract(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
response := GetDepositContractResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &response))
|
||||
assert.Equal(t, "10", response.Data.ChainId)
|
||||
assert.Equal(t, "0x4242424242424242424242424242424242424242", response.Data.Address)
|
||||
}
|
||||
|
||||
func TestGetSpec(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
@@ -137,12 +160,19 @@ func TestGetSpec(t *testing.T) {
|
||||
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
server := &Server{}
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/spec", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
assert.Equal(t, 112, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
GetSpec(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := GetSpecResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 112, len(data))
|
||||
for k, v := range data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
assert.Equal(t, "ConfigName", v)
|
||||
@@ -353,7 +383,9 @@ func TestGetSpec(t *testing.T) {
|
||||
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
|
||||
assert.Equal(t, "72", v)
|
||||
case "TERMINAL_BLOCK_HASH":
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
|
||||
s, ok := v.(string)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
|
||||
case "TERMINAL_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "73", v)
|
||||
case "DefaultFeeRecipient":
|
||||
@@ -390,44 +422,55 @@ func TestGetSpec(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
s := &Server{}
|
||||
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.DeepEqual(t, genesisForkVersion, fork.PreviousVersion)
|
||||
assert.DeepEqual(t, string(firstForkVersion), string(fork.CurrentVersion))
|
||||
assert.Equal(t, firstForkEpoch, fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.DeepEqual(t, firstForkVersion, fork.PreviousVersion)
|
||||
assert.DeepEqual(t, secondForkVersion, fork.CurrentVersion)
|
||||
assert.Equal(t, secondForkEpoch, fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.DeepEqual(t, secondForkVersion, fork.PreviousVersion)
|
||||
assert.DeepEqual(t, thirdForkVersion, fork.CurrentVersion)
|
||||
assert.Equal(t, thirdForkEpoch, fork.Epoch)
|
||||
}
|
||||
|
||||
func TestForkSchedule_CorrectNumberOfForks(t *testing.T) {
|
||||
s := &Server{}
|
||||
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
|
||||
})
|
||||
t.Run("correct number of forks", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
})
|
||||
}
|
||||
20
beacon-chain/rpc/eth/config/structs.go
Normal file
20
beacon-chain/rpc/eth/config/structs.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package config
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
|
||||
type GetDepositContractResponse struct {
|
||||
Data *DepositContractData `json:"data"`
|
||||
}
|
||||
|
||||
type DepositContractData struct {
|
||||
ChainId string `json:"chain_id"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type GetForkScheduleResponse struct {
|
||||
Data []*shared.Fork `json:"data"`
|
||||
}
|
||||
|
||||
type GetSpecResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"debug.go",
|
||||
"handlers.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
@@ -20,26 +19,18 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"debug_test.go",
|
||||
"handlers_test.go",
|
||||
],
|
||||
srcs = ["handlers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
@@ -48,14 +39,12 @@ go_test(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// ListForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (*ethpbv2.ForkChoiceHeadsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
|
||||
resp := ðpbv2.ForkChoiceHeadsResponse{
|
||||
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
|
||||
}
|
||||
resp.Data[i] = ðpbv2.ForkChoiceHead{
|
||||
Root: headRoots[i][:],
|
||||
Slot: headSlots[i],
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
|
||||
return ds.ForkchoiceFetcher.ForkChoiceDump(ctx)
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
expectedSlotsAndRoots := []struct {
|
||||
Slot primitives.Slot
|
||||
Root [32]byte
|
||||
}{{
|
||||
Slot: 0,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
}, {
|
||||
Slot: 1,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
server := &Server{
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.DeepEqual(t, sr.Root[:], h.Root)
|
||||
}
|
||||
assert.Equal(t, false, h.ExecutionOptimistic)
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
|
||||
t.Run("optimistic head", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{
|
||||
Optimistic: true,
|
||||
OptimisticRoots: make(map[[32]byte]bool),
|
||||
}
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
chainService.OptimisticRoots[sr.Root] = true
|
||||
}
|
||||
server := &Server{
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.DeepEqual(t, sr.Root[:], h.Root)
|
||||
}
|
||||
assert.Equal(t, true, h.ExecutionOptimistic)
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetForkChoice(t *testing.T) {
|
||||
store := doublylinkedtree.New()
|
||||
fRoot := [32]byte{'a'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
bs := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
|
||||
}
|
||||
@@ -3,8 +3,10 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
@@ -147,3 +149,74 @@ func (s *Server) getBeaconStateSSZV2(ctx context.Context, w http.ResponseWriter,
|
||||
w.Header().Set(api.VersionHeader, version.String(st.Version()))
|
||||
http2.WriteSsz(w, sszState, "beacon_state.ssz")
|
||||
}
|
||||
|
||||
// GetForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
|
||||
func (s *Server) GetForkChoiceHeadsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "debug.GetForkChoiceHeadsV2")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := s.HeadFetcher.ChainHeads()
|
||||
resp := &GetForkChoiceHeadsV2Response{
|
||||
Data: make([]*ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check if head is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp.Data[i] = &ForkChoiceHead{
|
||||
Root: hexutil.Encode(headRoots[i][:]),
|
||||
Slot: fmt.Sprintf("%d", headSlots[i]),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "debug.GetForkChoice")
|
||||
defer span.End()
|
||||
|
||||
dump, err := s.ForkchoiceFetcher.ForkChoiceDump(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get forkchoice dump: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
nodes := make([]*ForkChoiceNode, len(dump.ForkChoiceNodes))
|
||||
for i, n := range dump.ForkChoiceNodes {
|
||||
nodes[i] = &ForkChoiceNode{
|
||||
Slot: fmt.Sprintf("%d", n.Slot),
|
||||
BlockRoot: hexutil.Encode(n.BlockRoot),
|
||||
ParentRoot: hexutil.Encode(n.ParentRoot),
|
||||
JustifiedEpoch: fmt.Sprintf("%d", n.JustifiedEpoch),
|
||||
FinalizedEpoch: fmt.Sprintf("%d", n.FinalizedEpoch),
|
||||
Weight: fmt.Sprintf("%d", n.Weight),
|
||||
ExecutionBlockHash: hexutil.Encode(n.ExecutionBlockHash),
|
||||
Validity: n.Validity.String(),
|
||||
ExtraData: &ForkChoiceNodeExtraData{
|
||||
UnrealizedJustifiedEpoch: fmt.Sprintf("%d", n.UnrealizedJustifiedEpoch),
|
||||
UnrealizedFinalizedEpoch: fmt.Sprintf("%d", n.UnrealizedFinalizedEpoch),
|
||||
Balance: fmt.Sprintf("%d", n.Balance),
|
||||
ExecutionOptimistic: n.ExecutionOptimistic,
|
||||
TimeStamp: fmt.Sprintf("%d", n.Timestamp),
|
||||
},
|
||||
}
|
||||
}
|
||||
resp := &GetForkChoiceDumpResponse{
|
||||
JustifiedCheckpoint: shared.CheckpointFromConsensus(dump.JustifiedCheckpoint),
|
||||
FinalizedCheckpoint: shared.CheckpointFromConsensus(dump.FinalizedCheckpoint),
|
||||
ForkChoiceNodes: nodes,
|
||||
ExtraData: &ForkChoiceDumpExtraData{
|
||||
UnrealizedJustifiedCheckpoint: shared.CheckpointFromConsensus(dump.UnrealizedJustifiedCheckpoint),
|
||||
UnrealizedFinalizedCheckpoint: shared.CheckpointFromConsensus(dump.UnrealizedFinalizedCheckpoint),
|
||||
ProposerBoostRoot: hexutil.Encode(dump.ProposerBoostRoot),
|
||||
PreviousProposerBoostRoot: hexutil.Encode(dump.PreviousProposerBoostRoot),
|
||||
HeadRoot: hexutil.Encode(dump.HeadRoot),
|
||||
},
|
||||
}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
@@ -8,11 +8,15 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -381,3 +385,98 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetForkChoiceHeadsV2(t *testing.T) {
|
||||
expectedSlotsAndRoots := []struct {
|
||||
Slot string
|
||||
Root string
|
||||
}{{
|
||||
Slot: "0",
|
||||
Root: hexutil.Encode(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
}, {
|
||||
Slot: "1",
|
||||
Root: hexutil.Encode(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/heads", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetForkChoiceHeadsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetForkChoiceHeadsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.Equal(t, sr.Root, h.Root)
|
||||
}
|
||||
assert.Equal(t, false, h.ExecutionOptimistic)
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
|
||||
t.Run("optimistic head", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{
|
||||
Optimistic: true,
|
||||
OptimisticRoots: make(map[[32]byte]bool),
|
||||
}
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
b, err := hexutil.Decode(sr.Root)
|
||||
require.NoError(t, err)
|
||||
chainService.OptimisticRoots[bytesutil.ToBytes32(b)] = true
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/heads", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetForkChoiceHeadsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetForkChoiceHeadsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.Equal(t, sr.Root, h.Root)
|
||||
}
|
||||
assert.Equal(t, true, h.ExecutionOptimistic)
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetForkChoice(t *testing.T) {
|
||||
store := doublylinkedtree.New()
|
||||
fRoot := [32]byte{'a'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
s := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/fork_choice", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetForkChoice(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetForkChoiceDumpResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, "2", resp.FinalizedCheckpoint.Epoch)
|
||||
}
|
||||
|
||||
@@ -963,3 +963,48 @@ func HistoricalSummaryFromConsensus(summary *eth.HistoricalSummary) (*Historical
|
||||
StateSummaryRoot: hexutil.Encode(summary.StateSummaryRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type GetForkChoiceHeadsV2Response struct {
|
||||
Data []*ForkChoiceHead `json:"data"`
|
||||
}
|
||||
|
||||
type ForkChoiceHead struct {
|
||||
Root string `json:"root"`
|
||||
Slot string `json:"slot"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type GetForkChoiceDumpResponse struct {
|
||||
JustifiedCheckpoint *shared.Checkpoint `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *shared.Checkpoint `json:"finalized_checkpoint"`
|
||||
ForkChoiceNodes []*ForkChoiceNode `json:"fork_choice_nodes"`
|
||||
ExtraData *ForkChoiceDumpExtraData `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceDumpExtraData struct {
|
||||
UnrealizedJustifiedCheckpoint *shared.Checkpoint `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *shared.Checkpoint `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root"`
|
||||
HeadRoot string `json:"head_root"`
|
||||
}
|
||||
|
||||
type ForkChoiceNode struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
Weight string `json:"weight"`
|
||||
Validity string `json:"validity"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash"`
|
||||
ExtraData *ForkChoiceNodeExtraData `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeExtraData struct {
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
@@ -242,7 +242,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
commitment, err := hexutil.Decode("0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8000")
|
||||
require.NoError(t, err)
|
||||
wantedBlobV1alpha1 := ð.SignedBlobSidecar{
|
||||
Message: ð.BlobSidecar{
|
||||
Message: ð.DeprecatedBlobSidecar{
|
||||
BlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Index: 1,
|
||||
Slot: 3,
|
||||
|
||||
@@ -2,9 +2,12 @@ package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -314,11 +317,39 @@ type ProposerSlashing struct {
|
||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2" validate:"required"`
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
h1, err := s.SignedHeader1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "SignedHeader1")
|
||||
}
|
||||
h2, err := s.SignedHeader2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "SignedHeader2")
|
||||
}
|
||||
|
||||
return ð.ProposerSlashing{
|
||||
Header_1: h1,
|
||||
Header_2: h2,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type AttesterSlashing struct {
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1" validate:"required"`
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2" validate:"required"`
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Attestation2")
|
||||
}
|
||||
return ð.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
Proof []string `json:"proof" validate:"required,dive,hexadecimal"`
|
||||
Data *DepositData `json:"data" validate:"required"`
|
||||
@@ -342,6 +373,22 @@ type SignedBeaconBlockHeader struct {
|
||||
Signature string `json:"signature" validate:"required"`
|
||||
}
|
||||
|
||||
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
|
||||
msg, err := h.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Message")
|
||||
}
|
||||
sig, err := DecodeHexWithLength(h.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SignedBeaconBlockHeader{
|
||||
Header: msg,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type BeaconBlockHeader struct {
|
||||
Slot string `json:"slot" validate:"required"`
|
||||
ProposerIndex string `json:"proposer_index" validate:"required"`
|
||||
@@ -350,12 +397,68 @@ type BeaconBlockHeader struct {
|
||||
BodyRoot string `json:"body_root" validate:"required"`
|
||||
}
|
||||
|
||||
func (h *BeaconBlockHeader) ToConsensus() (*eth.BeaconBlockHeader, error) {
|
||||
s, err := strconv.ParseUint(h.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Slot")
|
||||
}
|
||||
pi, err := strconv.ParseUint(h.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
pr, err := DecodeHexWithLength(h.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
sr, err := DecodeHexWithLength(h.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
br, err := DecodeHexWithLength(h.BodyRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "BodyRoot")
|
||||
}
|
||||
|
||||
return ð.BeaconBlockHeader{
|
||||
Slot: primitives.Slot(s),
|
||||
ProposerIndex: primitives.ValidatorIndex(pi),
|
||||
ParentRoot: pr,
|
||||
StateRoot: sr,
|
||||
BodyRoot: br,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type IndexedAttestation struct {
|
||||
AttestingIndices []string `json:"attesting_indices" validate:"required,dive"`
|
||||
Data *AttestationData `json:"data" validate:"required"`
|
||||
Signature string `json:"signature" validate:"required"`
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
indices[i], err = strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.IndexedAttestation{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits" validate:"required"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" validate:"required"`
|
||||
|
||||
@@ -1218,13 +1218,13 @@ func (b *BeaconBlockContentsDeneb) ToConsensus() (*eth.BeaconBlockAndBlobsDeneb,
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
var blobSidecars []*eth.BlobSidecar
|
||||
var blobSidecars []*eth.DeprecatedBlobSidecar
|
||||
if len(b.BlobSidecars) != 0 {
|
||||
err := VerifyMaxLength(b.BlobSidecars, fieldparams.MaxBlobsPerBlock)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "BlobSidecars")
|
||||
}
|
||||
blobSidecars = make([]*eth.BlobSidecar, len(b.BlobSidecars))
|
||||
blobSidecars = make([]*eth.DeprecatedBlobSidecar, len(b.BlobSidecars))
|
||||
for i := range b.BlobSidecars {
|
||||
blob, err := b.BlobSidecars[i].ToConsensus()
|
||||
if err != nil {
|
||||
@@ -1564,7 +1564,7 @@ func (b *BeaconBlockDeneb) ToConsensus() (*eth.BeaconBlockDeneb, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *BlobSidecar) ToConsensus() (*eth.BlobSidecar, error) {
|
||||
func (s *BlobSidecar) ToConsensus() (*eth.DeprecatedBlobSidecar, error) {
|
||||
if s == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
@@ -1600,7 +1600,7 @@ func (s *BlobSidecar) ToConsensus() (*eth.BlobSidecar, error) {
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "KzgProof")
|
||||
}
|
||||
bsc := ð.BlobSidecar{
|
||||
bsc := ð.DeprecatedBlobSidecar{
|
||||
BlockRoot: blockRoot,
|
||||
Index: index,
|
||||
Slot: primitives.Slot(slot),
|
||||
@@ -1676,7 +1676,7 @@ func (s *SignedBlobSidecar) ToConsensus() (*eth.SignedBlobSidecar, error) {
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Message.KzgProof")
|
||||
}
|
||||
bsc := ð.BlobSidecar{
|
||||
bsc := ð.DeprecatedBlobSidecar{
|
||||
BlockRoot: blockRoot,
|
||||
Index: index,
|
||||
Slot: primitives.Slot(slot),
|
||||
@@ -2005,14 +2005,8 @@ func BeaconBlockHeaderFromConsensus(h *eth.BeaconBlockHeader) *BeaconBlockHeader
|
||||
}
|
||||
|
||||
func BeaconBlockFromConsensus(b *eth.BeaconBlock) (*BeaconBlock, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2060,14 +2054,8 @@ func SignedBeaconBlockFromConsensus(b *eth.SignedBeaconBlock) (*SignedBeaconBloc
|
||||
}
|
||||
|
||||
func BeaconBlockAltairFromConsensus(b *eth.BeaconBlockAltair) (*BeaconBlockAltair, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2119,14 +2107,8 @@ func SignedBeaconBlockAltairFromConsensus(b *eth.SignedBeaconBlockAltair) (*Sign
|
||||
}
|
||||
|
||||
func BlindedBeaconBlockBellatrixFromConsensus(b *eth.BlindedBeaconBlockBellatrix) (*BlindedBeaconBlockBellatrix, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2183,14 +2165,8 @@ func SignedBlindedBeaconBlockBellatrixFromConsensus(b *eth.SignedBlindedBeaconBl
|
||||
}
|
||||
|
||||
func BeaconBlockBellatrixFromConsensus(b *eth.BeaconBlockBellatrix) (*BeaconBlockBellatrix, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2266,14 +2242,8 @@ func SignedBeaconBlockBellatrixFromConsensus(b *eth.SignedBeaconBlockBellatrix)
|
||||
}
|
||||
|
||||
func BlindedBeaconBlockCapellaFromConsensus(b *eth.BlindedBeaconBlockCapella) (*BlindedBeaconBlockCapella, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2335,14 +2305,8 @@ func SignedBlindedBeaconBlockCapellaFromConsensus(b *eth.SignedBlindedBeaconBloc
|
||||
}
|
||||
|
||||
func BeaconBlockCapellaFromConsensus(b *eth.BeaconBlockCapella) (*BeaconBlockCapella, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2521,14 +2485,8 @@ func SignedBeaconBlockContentsDenebFromConsensus(b *eth.SignedBeaconBlockAndBlob
|
||||
}
|
||||
|
||||
func BlindedBeaconBlockDenebFromConsensus(b *eth.BlindedBeaconBlockDeneb) (*BlindedBeaconBlockDeneb, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2595,14 +2553,8 @@ func SignedBlindedBeaconBlockDenebFromConsensus(b *eth.SignedBlindedBeaconBlockD
|
||||
}
|
||||
|
||||
func BeaconBlockDenebFromConsensus(b *eth.BeaconBlockDeneb) (*BeaconBlockDeneb, error) {
|
||||
proposerSlashings, err := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerSlashings := ProposerSlashingsFromConsensus(b.Body.ProposerSlashings)
|
||||
attesterSlashings := AttesterSlashingsFromConsensus(b.Body.AttesterSlashings)
|
||||
atts, err := AttsFromConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2723,7 +2675,7 @@ func SignedBlindedBlobSidecarFromConsensus(b *eth.SignedBlindedBlobSidecar) (*Si
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BlobSidecarFromConsensus(b *eth.BlobSidecar) (*BlobSidecar, error) {
|
||||
func BlobSidecarFromConsensus(b *eth.DeprecatedBlobSidecar) (*BlobSidecar, error) {
|
||||
return &BlobSidecar{
|
||||
BlockRoot: hexutil.Encode(b.BlockRoot),
|
||||
Index: fmt.Sprintf("%d", b.Index),
|
||||
@@ -2847,7 +2799,7 @@ func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlash
|
||||
return proposerSlashings, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsFromConsensus(src []*eth.ProposerSlashing) ([]*ProposerSlashing, error) {
|
||||
func ProposerSlashingsFromConsensus(src []*eth.ProposerSlashing) []*ProposerSlashing {
|
||||
proposerSlashings := make([]*ProposerSlashing, len(src))
|
||||
for i, s := range src {
|
||||
proposerSlashings[i] = &ProposerSlashing{
|
||||
@@ -2873,7 +2825,7 @@ func ProposerSlashingsFromConsensus(src []*eth.ProposerSlashing) ([]*ProposerSla
|
||||
},
|
||||
}
|
||||
}
|
||||
return proposerSlashings, nil
|
||||
return proposerSlashings
|
||||
}
|
||||
|
||||
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
|
||||
@@ -2953,7 +2905,7 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
return attesterSlashings, nil
|
||||
}
|
||||
|
||||
func AttesterSlashingsFromConsensus(src []*eth.AttesterSlashing) ([]*AttesterSlashing, error) {
|
||||
func AttesterSlashingsFromConsensus(src []*eth.AttesterSlashing) []*AttesterSlashing {
|
||||
attesterSlashings := make([]*AttesterSlashing, len(src))
|
||||
for i, s := range src {
|
||||
a1AttestingIndices := make([]string, len(s.Attestation_1.AttestingIndices))
|
||||
@@ -3001,7 +2953,7 @@ func AttesterSlashingsFromConsensus(src []*eth.AttesterSlashing) ([]*AttesterSla
|
||||
},
|
||||
}
|
||||
}
|
||||
return attesterSlashings, nil
|
||||
return attesterSlashings
|
||||
}
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"handlers_block.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -40,9 +39,6 @@ go_library(
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -62,7 +58,6 @@ go_test(
|
||||
srcs = [
|
||||
"handlers_block_test.go",
|
||||
"handlers_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -93,9 +88,6 @@ go_test(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/mock:go_default_library",
|
||||
|
||||
@@ -21,21 +21,26 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProduceBlockV3 Requests a beacon node to produce a valid block, which can then be signed by a validator. The
|
||||
// returned block may be blinded or unblinded, depending on the current state of the network as
|
||||
// decided by the execution and beacon nodes.
|
||||
// The beacon node must return an unblinded block if it obtains the execution payload from its
|
||||
// paired execution node. It must only return a blinded block if it obtains the execution payload
|
||||
// header from an MEV relay.
|
||||
// Metadata in the response indicates the type of block produced, and the supported types of block
|
||||
// will be added to as forks progress.
|
||||
func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceBlockV3")
|
||||
type blockType uint8
|
||||
|
||||
const (
|
||||
any blockType = iota
|
||||
full
|
||||
blinded
|
||||
)
|
||||
|
||||
// DEPRECATED: Please use ProduceBlockV3 instead.
|
||||
//
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
func (s *Server) ProduceBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
rawSlot := segments[len(segments)-1]
|
||||
rawRandaoReveal := r.URL.Query().Get("randao_reveal")
|
||||
@@ -53,7 +58,7 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
rr, err := shared.DecodeHexWithLength(rawRandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "unable to decode randao reveal").Error(), http.StatusBadRequest)
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode randao reveal").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
randaoReveal = rr
|
||||
@@ -62,7 +67,7 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
if rawGraffiti != "" {
|
||||
g, err := shared.DecodeHexWithLength(rawGraffiti, 32)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "unable to decode graffiti").Error(), http.StatusBadRequest)
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode graffiti").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
graffiti = g
|
||||
@@ -73,10 +78,118 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti,
|
||||
SkipMevBoost: false,
|
||||
})
|
||||
}, full)
|
||||
}
|
||||
|
||||
func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *http.Request, v1alpha1req *eth.BlockRequest) {
|
||||
// DEPRECATED: Please use ProduceBlockV3 instead.
|
||||
//
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
func (s *Server) ProduceBlindedBlock(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
rawSlot := segments[len(segments)-1]
|
||||
rawRandaoReveal := r.URL.Query().Get("randao_reveal")
|
||||
rawGraffiti := r.URL.Query().Get("graffiti")
|
||||
rawSkipRandaoVerification := r.URL.Query().Get("skip_randao_verification")
|
||||
|
||||
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
|
||||
var randaoReveal []byte
|
||||
if rawSkipRandaoVerification == "true" {
|
||||
randaoReveal = primitives.PointAtInfinity
|
||||
} else {
|
||||
rr, err := shared.DecodeHexWithLength(rawRandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode randao reveal").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
randaoReveal = rr
|
||||
}
|
||||
var graffiti []byte
|
||||
if rawGraffiti != "" {
|
||||
g, err := shared.DecodeHexWithLength(rawGraffiti, 32)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode graffiti").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
graffiti = g
|
||||
}
|
||||
|
||||
s.produceBlockV3(ctx, w, r, ð.BlockRequest{
|
||||
Slot: primitives.Slot(slot),
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti,
|
||||
SkipMevBoost: false,
|
||||
}, blinded)
|
||||
}
|
||||
|
||||
// ProduceBlockV3 requests a beacon node to produce a valid block, which can then be signed by a validator. The
|
||||
// returned block may be blinded or unblinded, depending on the current state of the network as
|
||||
// decided by the execution and beacon nodes.
|
||||
// The beacon node must return an unblinded block if it obtains the execution payload from its
|
||||
// paired execution node. It must only return a blinded block if it obtains the execution payload
|
||||
// header from an MEV relay.
|
||||
// Metadata in the response indicates the type of block produced, and the supported types of block
|
||||
// will be added to as forks progress.
|
||||
func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceBlockV3")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
rawSlot := segments[len(segments)-1]
|
||||
rawRandaoReveal := r.URL.Query().Get("randao_reveal")
|
||||
rawGraffiti := r.URL.Query().Get("graffiti")
|
||||
rawSkipRandaoVerification := r.URL.Query().Get("skip_randao_verification")
|
||||
|
||||
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
|
||||
var randaoReveal []byte
|
||||
if rawSkipRandaoVerification == "true" {
|
||||
randaoReveal = primitives.PointAtInfinity
|
||||
} else {
|
||||
rr, err := shared.DecodeHexWithLength(rawRandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode randao reveal").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
randaoReveal = rr
|
||||
}
|
||||
var graffiti []byte
|
||||
if rawGraffiti != "" {
|
||||
g, err := shared.DecodeHexWithLength(rawGraffiti, 32)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "Unable to decode graffiti").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
graffiti = g
|
||||
}
|
||||
|
||||
s.produceBlockV3(ctx, w, r, ð.BlockRequest{
|
||||
Slot: primitives.Slot(slot),
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti,
|
||||
SkipMevBoost: false,
|
||||
}, any)
|
||||
}
|
||||
|
||||
func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *http.Request, v1alpha1req *eth.BlockRequest, requiredType blockType) {
|
||||
isSSZ := http2.SszRequested(r)
|
||||
v1alpha1resp, err := s.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
@@ -84,6 +197,14 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if requiredType == blinded && !v1alpha1resp.IsBlinded {
|
||||
http2.HandleError(w, "Prepared block is not blinded", http.StatusInternalServerError)
|
||||
return
|
||||
} else if requiredType == full && v1alpha1resp.IsBlinded {
|
||||
http2.HandleError(w, "Prepared block is blinded", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
|
||||
if httpError != nil {
|
||||
http2.WriteError(w, httpError)
|
||||
@@ -98,13 +219,13 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Phase0))
|
||||
// rewards aren't used in phase 0
|
||||
handleProducePhase0V3(ctx, w, isSSZ, phase0Block, v1alpha1resp.PayloadValue)
|
||||
handleProducePhase0V3(w, isSSZ, phase0Block, v1alpha1resp.PayloadValue)
|
||||
return
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Altair))
|
||||
handleProduceAltairV3(ctx, w, isSSZ, altairBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceAltairV3(w, isSSZ, altairBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
@@ -119,37 +240,37 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
blindedBellatrixBlock, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_BlindedBellatrix)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
handleProduceBlindedBellatrixV3(ctx, w, isSSZ, blindedBellatrixBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceBlindedBellatrixV3(w, isSSZ, blindedBellatrixBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
handleProduceBellatrixV3(ctx, w, isSSZ, bellatrixBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceBellatrixV3(w, isSSZ, bellatrixBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
blindedCapellaBlock, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_BlindedCapella)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Capella))
|
||||
handleProduceBlindedCapellaV3(ctx, w, isSSZ, blindedCapellaBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceBlindedCapellaV3(w, isSSZ, blindedCapellaBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
capellaBlock, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Capella)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Capella))
|
||||
handleProduceCapellaV3(ctx, w, isSSZ, capellaBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceCapellaV3(w, isSSZ, capellaBlock, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
blindedDenebBlockContents, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_BlindedDeneb)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Deneb))
|
||||
handleProduceBlindedDenebV3(ctx, w, isSSZ, blindedDenebBlockContents, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceBlindedDenebV3(w, isSSZ, blindedDenebBlockContents, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
denebBlockContents, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Deneb)
|
||||
if ok {
|
||||
w.Header().Set(api.VersionHeader, version.String(version.Deneb))
|
||||
handleProduceDenebV3(ctx, w, isSSZ, denebBlockContents, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
handleProduceDenebV3(w, isSSZ, denebBlockContents, v1alpha1resp.PayloadValue, consensusBlockValue)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -201,14 +322,11 @@ func getConsensusBlockValue(ctx context.Context, blockRewardsFetcher rewards.Blo
|
||||
}
|
||||
|
||||
func handleProducePhase0V3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Phase0,
|
||||
payloadValue uint64,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProducePhase0V3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Phase0.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -238,16 +356,12 @@ func handleProducePhase0V3(
|
||||
}
|
||||
|
||||
func handleProduceAltairV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Altair,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceAltairV3")
|
||||
defer span.End()
|
||||
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Altair.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -277,15 +391,12 @@ func handleProduceAltairV3(
|
||||
}
|
||||
|
||||
func handleProduceBellatrixV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Bellatrix,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceBellatrixV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Bellatrix.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -315,15 +426,12 @@ func handleProduceBellatrixV3(
|
||||
}
|
||||
|
||||
func handleProduceBlindedBellatrixV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedBellatrix,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceBlindedBellatrixV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.BlindedBellatrix.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -353,15 +461,12 @@ func handleProduceBlindedBellatrixV3(
|
||||
}
|
||||
|
||||
func handleProduceBlindedCapellaV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedCapella,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceBlindedCapellaV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.BlindedCapella.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -391,15 +496,12 @@ func handleProduceBlindedCapellaV3(
|
||||
}
|
||||
|
||||
func handleProduceCapellaV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Capella,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceCapellaV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Capella.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -429,15 +531,12 @@ func handleProduceCapellaV3(
|
||||
}
|
||||
|
||||
func handleProduceBlindedDenebV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_BlindedDeneb,
|
||||
executionPayloadValue uint64,
|
||||
consensusPayloadValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceBlindedDenebV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.BlindedDeneb.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -467,15 +566,12 @@ func handleProduceBlindedDenebV3(
|
||||
}
|
||||
|
||||
func handleProduceDenebV3(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
isSSZ bool,
|
||||
blk *eth.GenericBeaconBlock_Deneb,
|
||||
executionPayloadValue uint64,
|
||||
consensusBlockValue string,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV3.internal.handleProduceDenebV3")
|
||||
defer span.End()
|
||||
if isSSZ {
|
||||
sszResp, err := blk.Deneb.MarshalSSZ()
|
||||
if err != nil {
|
||||
|
||||
@@ -17,12 +17,569 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
rpctesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestProduceBlockV2(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.Message)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockAltair
|
||||
err := json.Unmarshal([]byte(rpctesting.AltairBlock), &block)
|
||||
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.Message)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(rpctesting.BellatrixBlock), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.Message)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("BlindedBellatrix", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedBellatrixBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(rpctesting.CapellaBlock), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.Message)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Blinded Capella", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedCapellaBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
return g, err
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
jsonBytes, err := json.Marshal(block.ToUnsigned())
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
|
||||
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
|
||||
require.Equal(t, want, body)
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Blinded Deneb", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
t.Run("invalid query parameter slot empty", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v3/validator/blocks/", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "slot is required"))
|
||||
})
|
||||
t.Run("invalid query parameter slot invalid", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v3/validator/blocks/asdfsad", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "slot is invalid"))
|
||||
})
|
||||
t.Run("invalid query parameter randao_reveal invalid", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v3/validator/blocks/1?randao_reveal=0x213123", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing and not serving request on that endpoint"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockRewards := &rewards.BlockRewards{Total: "10"}
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericSignedBeaconBlock_Phase0)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Phase0.Block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockAltair
|
||||
err := json.Unmarshal([]byte(rpctesting.AltairBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericSignedBeaconBlock_Altair)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Altair.Block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(rpctesting.BellatrixBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericSignedBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Bellatrix.Block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("BlindedBellatrix", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedBellatrixBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(rpctesting.CapellaBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.Message.ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericSignedBeaconBlock_Capella)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Capella.Block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Blinded Capella", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedCapellaBlock), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
g, err := block.Message.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
g.PayloadValue = 2000 //some fake value
|
||||
return g, err
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
var block *shared.SignedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
g, err := block.ToUnsigned().ToGeneric()
|
||||
require.NoError(t, err)
|
||||
bl, ok := g.Block.(*eth.GenericBeaconBlock_Deneb)
|
||||
require.Equal(t, true, ok)
|
||||
ssz, err := bl.Deneb.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(ssz), writer.Body.String())
|
||||
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
|
||||
})
|
||||
t.Run("Blinded Deneb", func(t *testing.T) {
|
||||
var block *shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &block)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(
|
||||
func() (*eth.GenericBeaconBlock, error) {
|
||||
return block.ToUnsigned().ToGeneric()
|
||||
}())
|
||||
mockChainService := &blockchainTesting.ChainService{}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
BlockRewardFetcher: &rewardtesting.MockBlockRewardFetcher{Rewards: mockRewards},
|
||||
}
|
||||
rr := "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" +
|
||||
"&graffiti=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://foo.example/eth/v3/validator/blocks/1?randao_reveal=%s", rr), nil)
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.ProduceBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
assert.StringContains(t, "Prepared block is blinded", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlockV3(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
|
||||
@@ -1,509 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
// By definition `/eth/v2/validator/blocks/{slot}`, does not produce block using mev-boost and relayer network.
|
||||
// The following endpoint states that the returned object is a BeaconBlock, not a BlindedBeaconBlock. As such, the block must return a full ExecutionPayload:
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.3.0#/Validator/produceBlockV2
|
||||
//
|
||||
// To use mev-boost and relayer network. It's recommended to use the following endpoint:
|
||||
// https://github.com/ethereum/beacon-APIs/blob/master/apis/validator/blinded_block.yaml
|
||||
func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlockResponseV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
SkipMevBoost: true, // Skip mev-boost and relayer network
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlockResponseV2{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BeaconBlockContainerV2{
|
||||
Block: ðpbv2.BeaconBlockContainerV2_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlockResponseV2{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BeaconBlockContainerV2{
|
||||
Block: ðpbv2.BeaconBlockContainerV2_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedBellatrix)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Bellatrix beacon block is blinded")
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlockResponseV2{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BeaconBlockContainerV2{
|
||||
Block: ðpbv2.BeaconBlockContainerV2_BellatrixBlock{BellatrixBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedCapella)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Capella beacon block is blinded")
|
||||
}
|
||||
capellaBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Capella)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlock.Capella)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlockResponseV2{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.BeaconBlockContainerV2{
|
||||
Block: ðpbv2.BeaconBlockContainerV2_CapellaBlock{CapellaBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedDeneb)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Deneb beacon block contents are blinded")
|
||||
}
|
||||
denebBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Deneb)
|
||||
if ok {
|
||||
blockAndBlobs, err := migration.V1Alpha1BeaconBlockDenebAndBlobsToV2(denebBlock.Deneb)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block contents: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlockResponseV2{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.BeaconBlockContainerV2{
|
||||
Block: ðpbv2.BeaconBlockContainerV2_DenebContents{
|
||||
DenebContents: ðpbv2.BeaconBlockContentsDeneb{
|
||||
Block: blockAndBlobs.Block,
|
||||
BlobSidecars: blockAndBlobs.BlobSidecars,
|
||||
}},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlockV2SSZ requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
// By definition `/eth/v2/validator/blocks/{slot}/ssz`, does not produce block using mev-boost and relayer network:
|
||||
// The following endpoint states that the returned object is a BeaconBlock, not a BlindedBeaconBlock. As such, the block must return a full ExecutionPayload:
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.3.0#/Validator/produceBlockV2
|
||||
//
|
||||
// To use mev-boost and relayer network. It's recommended to use the following endpoint:
|
||||
// https://github.com/ethereum/beacon-APIs/blob/master/apis/validator/blinded_block.yaml
|
||||
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
SkipMevBoost: true, // Skip mev-boost and relayer network
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedBellatrix)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Bellatrix beacon block is blinded")
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedCapella)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Capella beacon block is blinded")
|
||||
}
|
||||
capellaBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Capella)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlock.Capella)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedDeneb)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Deneb beacon blockcontent is blinded")
|
||||
}
|
||||
denebBlockcontent, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Deneb)
|
||||
if ok {
|
||||
blockContent, err := migration.V1Alpha1BeaconBlockDenebAndBlobsToV2(denebBlockcontent.Deneb)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := blockContent.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// Under the following conditions, this endpoint will return an error.
|
||||
// - The node is syncing or optimistic mode (after bellatrix).
|
||||
// - The builder is not figured (after bellatrix).
|
||||
// - The relayer circuit breaker is activated (after bellatrix).
|
||||
// - The relayer responded with an error (after bellatrix).
|
||||
func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if !vs.BlockBuilder.Configured() {
|
||||
return nil, status.Error(codes.Internal, "Block builder not configured")
|
||||
}
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared beacon block is not blinded")
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedBellatrix)
|
||||
if ok {
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(bellatrixBlock.BlindedBellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blk},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Capella)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared beacon block is not blinded")
|
||||
}
|
||||
capellaBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedCapella)
|
||||
if ok {
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(capellaBlock.BlindedCapella)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: blk},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Deneb)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Deneb beacon block contents are not blinded")
|
||||
}
|
||||
denebBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedDeneb)
|
||||
if ok {
|
||||
blockAndBlobs, err := migration.V1Alpha1BlindedBlockAndBlobsDenebToV2Blinded(denebBlock.BlindedDeneb)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block contents: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_DenebContents{
|
||||
DenebContents: ðpbv2.BlindedBeaconBlockContentsDeneb{
|
||||
BlindedBlock: blockAndBlobs.BlindedBlock,
|
||||
BlindedBlobSidecars: blockAndBlobs.BlindedBlobSidecars,
|
||||
}},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("block was not a supported blinded block type, validator may not be registered if using a relay. received: %T", v1alpha1resp.Block))
|
||||
}
|
||||
|
||||
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if !vs.BlockBuilder.Configured() {
|
||||
return nil, status.Error(codes.Internal, "Block builder not configured")
|
||||
}
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Bellatrix beacon block is not blinded")
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedBellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(bellatrixBlock.BlindedBellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Capella)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Capella beacon block is not blinded")
|
||||
}
|
||||
capellaBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedCapella)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(capellaBlock.BlindedCapella)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
_, ok = v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Deneb)
|
||||
if ok {
|
||||
return nil, status.Error(codes.Internal, "Prepared Deneb beacon block content is not blinded")
|
||||
}
|
||||
denebBlockcontent, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_BlindedDeneb)
|
||||
if ok {
|
||||
blockContent, err := migration.V1Alpha1BlindedBlockAndBlobsDenebToV2Blinded(denebBlockcontent.BlindedDeneb)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := blockContent.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
@@ -1,805 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestProduceBlockV2(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Phase0{Phase0: ðpbalpha.BeaconBlock{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.Phase0Block.Slot)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Altair{Altair: ðpbalpha.BeaconBlockAltair{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.AltairBlock.Slot)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: ðpbalpha.BeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.BellatrixBlock.Slot)
|
||||
})
|
||||
t.Run("Bellatrix blinded", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: ðpbalpha.BlindedBeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Bellatrix beacon block is blinded", err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Capella{Capella: ðpbalpha.BeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.CapellaBlock.Slot)
|
||||
})
|
||||
t.Run("Capella blinded", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedCapella{BlindedCapella: ðpbalpha.BlindedBeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Capella beacon block is blinded", err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{
|
||||
Block: ðpbalpha.GenericBeaconBlock_Deneb{
|
||||
Deneb: ðpbalpha.BeaconBlockAndBlobsDeneb{
|
||||
Block: ðpbalpha.BeaconBlockDeneb{Slot: 123},
|
||||
Blobs: []*ethpbalpha.BlobSidecar{{Slot: 123}},
|
||||
}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_DenebContents)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.DenebContents.Block.Slot)
|
||||
require.Equal(t, 1, len(containerBlock.DenebContents.BlobSidecars))
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.DenebContents.BlobSidecars[0].Slot)
|
||||
})
|
||||
t.Run("Deneb blinded", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{
|
||||
Block: ðpbalpha.GenericBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ðpbalpha.BlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: ðpbalpha.BlindedBeaconBlockDeneb{Slot: 123},
|
||||
Blobs: []*ethpbalpha.BlindedBlobSidecar{{Slot: 123}},
|
||||
}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Deneb beacon block contents are blinded", err)
|
||||
})
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: ðpbalpha.BeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.ErrorContains(t, "The node is currently optimistic and cannot serve validators", err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.ProduceBlockV2(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlock(ðpbalpha.BeaconBlock{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Phase0{Phase0: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlockAltair(ðpbalpha.BeaconBlockAltair{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Altair{Altair: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlockBellatrix(ðpbalpha.BeaconBlockBellatrix{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Bellatrix blinded", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: ðpbalpha.BlindedBeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Bellatrix beacon block is blinded", err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlockCapella(ðpbalpha.BeaconBlockCapella{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Capella{Capella: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Capella blinded", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedCapella{BlindedCapella: ðpbalpha.BlindedBeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Capella beacon block is blinded", err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
b, err := util.NewBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
b.SignedBlock.Message.Slot = 123
|
||||
blk, err := migration.V2BeaconBlockDenebToV1Alpha1(b.SignedBlock.Message)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlobsToV1Alpha1SignedBlobs(b.SignedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlobSidecar, len(signedBlobs))
|
||||
v2blobs := make([]*ethpbv2.BlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
v2blobs[i] = b.SignedBlobSidecars[i].Message
|
||||
}
|
||||
|
||||
blkContents := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Deneb{
|
||||
Deneb: ðpbalpha.BeaconBlockAndBlobsDeneb{
|
||||
Block: blk,
|
||||
Blobs: blobs,
|
||||
},
|
||||
}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blkContents, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedObject := ðpbv2.BeaconBlockContentsDeneb{
|
||||
Block: b.SignedBlock.Message,
|
||||
BlobSidecars: v2blobs,
|
||||
}
|
||||
expectedData, err := expectedObject.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Deneb blinded", func(t *testing.T) {
|
||||
b, err := util.NewBlindedBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
blk, err := migration.BlindedDenebToV1Alpha1SignedBlock(b.SignedBlindedBlock)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(b.SignedBlindedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlindedBlobSidecar, len(signedBlobs))
|
||||
v2blobs := make([]*ethpbv2.BlindedBlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
v2blobs[i] = b.SignedBlindedBlobSidecars[i].Message
|
||||
}
|
||||
genericBlock := ðpbalpha.GenericBeaconBlock{
|
||||
Block: ðpbalpha.GenericBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ðpbalpha.BlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: blk.Message,
|
||||
Blobs: blobs,
|
||||
},
|
||||
},
|
||||
}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(genericBlock, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err = server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Deneb beacon blockcontent is blinded", err)
|
||||
})
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: ðpbalpha.BeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlockV2SSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.ErrorContains(t, "The node is currently optimistic and cannot serve validators", err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.ProduceBlockV2SSZ(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlock(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Phase0{Phase0: ðpbalpha.BeaconBlock{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.Phase0Block.Slot)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Altair{Altair: ðpbalpha.BeaconBlockAltair{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.AltairBlock.Slot)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: ðpbalpha.BlindedBeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.BellatrixBlock.Slot)
|
||||
})
|
||||
t.Run("Bellatrix full", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: ðpbalpha.BeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared beacon block is not blinded", err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedCapella{BlindedCapella: ðpbalpha.BlindedBeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.CapellaBlock.Slot)
|
||||
})
|
||||
t.Run("Capella full", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Capella{Capella: ðpbalpha.BeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared beacon block is not blinded", err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
b, err := util.NewBlindedBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
b.SignedBlindedBlock.Message.Slot = 123
|
||||
blk, err := migration.BlindedDenebToV1Alpha1SignedBlock(b.SignedBlindedBlock)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(b.SignedBlindedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlindedBlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
}
|
||||
genericBlock := ðpbalpha.GenericBeaconBlock{
|
||||
Block: ðpbalpha.GenericBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ðpbalpha.BlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: blk.Message,
|
||||
Blobs: blobs,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(genericBlock, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_DenebContents)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), containerBlock.DenebContents.BlindedBlock.Slot)
|
||||
assert.Equal(t, fieldparams.MaxBlobsPerBlock, len(containerBlock.DenebContents.BlindedBlobSidecars))
|
||||
})
|
||||
t.Run("Deneb full", func(t *testing.T) {
|
||||
b, err := util.NewBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
b.SignedBlock.Message.Slot = 123
|
||||
blk, err := migration.V2BeaconBlockDenebToV1Alpha1(b.SignedBlock.Message)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlobsToV1Alpha1SignedBlobs(b.SignedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
}
|
||||
blkContents := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Deneb{
|
||||
Deneb: ðpbalpha.BeaconBlockAndBlobsDeneb{
|
||||
Block: blk,
|
||||
Blobs: blobs,
|
||||
},
|
||||
}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blkContents, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err = server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Deneb beacon block contents are not blinded", err)
|
||||
})
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: ðpbalpha.BlindedBeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlock(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.ErrorContains(t, "The node is currently optimistic and cannot serve validators", err)
|
||||
})
|
||||
t.Run("builder not configured", func(t *testing.T) {
|
||||
v1Server := &Server{
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: false},
|
||||
}
|
||||
_, err := v1Server.ProduceBlindedBlock(context.Background(), nil)
|
||||
require.ErrorContains(t, "Block builder not configured", err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
_, err := v1Server.ProduceBlindedBlock(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlock(ðpbalpha.BeaconBlock{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Phase0{Phase0: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.HydrateBeaconBlockAltair(ðpbalpha.BeaconBlockAltair{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Altair{Altair: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.HydrateBlindedBeaconBlockBellatrix(ðpbalpha.BlindedBeaconBlockBellatrix{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Bellatrix full", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Bellatrix{Bellatrix: ðpbalpha.BeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Bellatrix beacon block is not blinded", err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.HydrateBlindedBeaconBlockCapella(ðpbalpha.BlindedBeaconBlockCapella{})
|
||||
b.Slot = 123
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedCapella{BlindedCapella: b}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedData, err := b.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Capella full", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Capella{Capella: ðpbalpha.BeaconBlockCapella{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Capella beacon block is not blinded", err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
b, err := util.NewBlindedBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
b.SignedBlindedBlock.Message.Slot = 123
|
||||
blk, err := migration.BlindedDenebToV1Alpha1SignedBlock(b.SignedBlindedBlock)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(b.SignedBlindedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlindedBlobSidecar, len(signedBlobs))
|
||||
v2blobs := make([]*ethpbv2.BlindedBlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
v2blobs[i] = b.SignedBlindedBlobSidecars[i].Message
|
||||
}
|
||||
genericBlock := ðpbalpha.GenericBeaconBlock{
|
||||
Block: ðpbalpha.GenericBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ðpbalpha.BlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: blk.Message,
|
||||
Blobs: blobs,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(genericBlock, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
resp, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.NoError(t, err)
|
||||
expectedObject := ðpbv2.BlindedBeaconBlockContentsDeneb{
|
||||
BlindedBlock: b.SignedBlindedBlock.Message,
|
||||
BlindedBlobSidecars: v2blobs,
|
||||
}
|
||||
expectedData, err := expectedObject.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedData, resp.Data)
|
||||
})
|
||||
t.Run("Deneb full", func(t *testing.T) {
|
||||
b, err := util.NewBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
b.SignedBlock.Message.Slot = 123
|
||||
blk, err := migration.V2BeaconBlockDenebToV1Alpha1(b.SignedBlock.Message)
|
||||
require.NoError(t, err)
|
||||
signedBlobs := migration.SignedBlobsToV1Alpha1SignedBlobs(b.SignedBlobSidecars)
|
||||
blobs := make([]*ethpbalpha.BlobSidecar, len(signedBlobs))
|
||||
for i := range signedBlobs {
|
||||
blobs[i] = signedBlobs[i].Message
|
||||
}
|
||||
blkContents := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_Deneb{
|
||||
Deneb: ðpbalpha.BeaconBlockAndBlobsDeneb{
|
||||
Block: blk,
|
||||
Blobs: blobs,
|
||||
},
|
||||
}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blkContents, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false},
|
||||
}
|
||||
|
||||
_, err = server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Prepared Deneb beacon block content is not blinded", err)
|
||||
})
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
blk := ðpbalpha.GenericBeaconBlock{Block: ðpbalpha.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: ðpbalpha.BlindedBeaconBlockBellatrix{Slot: 123}}}
|
||||
v1alpha1Server := mock.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().GetBeaconBlock(gomock.Any(), gomock.Any()).Return(blk, nil)
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true},
|
||||
}
|
||||
|
||||
_, err := server.ProduceBlindedBlockSSZ(ctx, ðpbv1.ProduceBlockRequest{})
|
||||
require.ErrorContains(t, "The node is currently optimistic and cannot serve validators", err)
|
||||
})
|
||||
t.Run("builder not configured", func(t *testing.T) {
|
||||
v1Server := &Server{
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: false},
|
||||
}
|
||||
_, err := v1Server.ProduceBlindedBlockSSZ(context.Background(), nil)
|
||||
require.ErrorContains(t, "Block builder not configured", err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true},
|
||||
}
|
||||
_, err := v1Server.ProduceBlindedBlockSSZ(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
@@ -2239,28 +2239,6 @@ func TestGetValidatorPerformanceCapella_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkListValidatorBalances(b *testing.B) {
|
||||
b.StopTimer()
|
||||
beaconDB := dbTest.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
|
||||
count := 1000
|
||||
_, _, headState := setupValidators(b, beaconDB, count)
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
}
|
||||
addDefaultReplayerBuilder(bs, beaconDB)
|
||||
|
||||
req := ðpb.ListValidatorBalancesRequest{PageSize: 100}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := bs.ListValidatorBalances(ctx, req)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupValidators(t testing.TB, _ db.Database, count int) ([]*ethpb.Validator, []uint64, state.BeaconState) {
|
||||
balances := make([]uint64, count)
|
||||
validators := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -214,9 +214,12 @@ go_test(
|
||||
timeout = "moderate",
|
||||
srcs = [
|
||||
"attester_mainnet_test.go",
|
||||
"proposer_utils_bench_test.go",
|
||||
"server_mainnet_test.go",
|
||||
"status_mainnet_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = common_deps,
|
||||
deps = common_deps + [
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -639,9 +639,12 @@ func BenchmarkCommitteeAssignment(b *testing.B) {
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
|
||||
chain := &mockChain.ChainService{State: bs, Root: genesisRoot[:]}
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Create request for all validators in the system.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user